|
1 /* |
|
2 * Copyright 2011 Google Inc. |
|
3 * |
|
4 * Use of this source code is governed by a BSD-style license that can be |
|
5 * found in the LICENSE file. |
|
6 */ |
|
7 |
|
8 #ifndef GrInOrderDrawBuffer_DEFINED |
|
9 #define GrInOrderDrawBuffer_DEFINED |
|
10 |
|
11 #include "GrDrawTarget.h" |
|
12 #include "GrAllocPool.h" |
|
13 #include "GrAllocator.h" |
|
14 #include "GrPath.h" |
|
15 |
|
16 #include "SkClipStack.h" |
|
17 #include "SkTemplates.h" |
|
18 #include "SkTypes.h" |
|
19 |
|
20 class GrGpu; |
|
21 class GrIndexBufferAllocPool; |
|
22 class GrVertexBufferAllocPool; |
|
23 |
|
24 /** |
|
25 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual |
|
26 * playback into a GrGpu. In theory one draw buffer could playback into another. When index or |
|
27 * vertex buffers are used as geometry sources it is the callers the draw buffer only holds |
|
28 * references to the buffers. It is the callers responsibility to ensure that the data is still |
|
29 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's |
|
30 * responsibility to ensure that all referenced textures, buffers, and render-targets are associated |
|
31 * in the GrGpu object that the buffer is played back into. The buffer requires VB and IB pools to |
|
32 * store geometry. |
|
33 */ |
|
34 class GrInOrderDrawBuffer : public GrDrawTarget { |
|
35 public: |
|
36 |
|
37 /** |
|
38 * Creates a GrInOrderDrawBuffer |
|
39 * |
|
40 * @param gpu the gpu object that this draw buffer flushes to. |
|
41 * @param vertexPool pool where vertices for queued draws will be saved when |
|
42 * the vertex source is either reserved or array. |
|
43 * @param indexPool pool where indices for queued draws will be saved when |
|
44 * the index source is either reserved or array. |
|
45 */ |
|
46 GrInOrderDrawBuffer(GrGpu* gpu, |
|
47 GrVertexBufferAllocPool* vertexPool, |
|
48 GrIndexBufferAllocPool* indexPool); |
|
49 |
|
50 virtual ~GrInOrderDrawBuffer(); |
|
51 |
|
52 /** |
|
53 * Empties the draw buffer of any queued up draws. This must not be called while inside an |
|
54 * unbalanced pushGeometrySource(). The current draw state and clip are preserved. |
|
55 */ |
|
56 void reset(); |
|
57 |
|
58 /** |
|
59 * This plays the queued up draws to its GrGpu target. It also resets this object (i.e. flushing |
|
60 * is destructive). This buffer must not have an active reserved vertex or index source. Any |
|
61 * reserved geometry on the target will be finalized because it's geometry source will be pushed |
|
62 * before flushing and popped afterwards. |
|
63 */ |
|
64 void flush(); |
|
65 |
|
66 // tracking for draws |
|
67 virtual DrawToken getCurrentDrawToken() { return DrawToken(this, fDrawID); } |
|
68 |
|
69 // overrides from GrDrawTarget |
|
70 virtual bool geometryHints(int* vertexCount, |
|
71 int* indexCount) const SK_OVERRIDE; |
|
72 virtual void clear(const SkIRect* rect, |
|
73 GrColor color, |
|
74 bool canIgnoreRect, |
|
75 GrRenderTarget* renderTarget = NULL) SK_OVERRIDE; |
|
76 |
|
77 virtual void initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) SK_OVERRIDE; |
|
78 |
|
79 protected: |
|
80 virtual void clipWillBeSet(const GrClipData* newClip) SK_OVERRIDE; |
|
81 |
|
82 private: |
|
83 enum Cmd { |
|
84 kDraw_Cmd = 1, |
|
85 kStencilPath_Cmd = 2, |
|
86 kSetState_Cmd = 3, |
|
87 kSetClip_Cmd = 4, |
|
88 kClear_Cmd = 5, |
|
89 kCopySurface_Cmd = 6, |
|
90 kDrawPath_Cmd = 7, |
|
91 }; |
|
92 |
|
93 class DrawRecord : public DrawInfo { |
|
94 public: |
|
95 DrawRecord(const DrawInfo& info) : DrawInfo(info) {} |
|
96 const GrVertexBuffer* fVertexBuffer; |
|
97 const GrIndexBuffer* fIndexBuffer; |
|
98 }; |
|
99 |
|
100 struct StencilPath : public ::SkNoncopyable { |
|
101 StencilPath(); |
|
102 |
|
103 SkAutoTUnref<const GrPath> fPath; |
|
104 SkPath::FillType fFill; |
|
105 }; |
|
106 |
|
107 struct DrawPath : public ::SkNoncopyable { |
|
108 DrawPath(); |
|
109 |
|
110 SkAutoTUnref<const GrPath> fPath; |
|
111 SkPath::FillType fFill; |
|
112 GrDeviceCoordTexture fDstCopy; |
|
113 }; |
|
114 |
|
115 struct Clear : public ::SkNoncopyable { |
|
116 Clear() : fRenderTarget(NULL) {} |
|
117 ~Clear() { SkSafeUnref(fRenderTarget); } |
|
118 |
|
119 SkIRect fRect; |
|
120 GrColor fColor; |
|
121 bool fCanIgnoreRect; |
|
122 GrRenderTarget* fRenderTarget; |
|
123 }; |
|
124 |
|
125 struct CopySurface : public ::SkNoncopyable { |
|
126 SkAutoTUnref<GrSurface> fDst; |
|
127 SkAutoTUnref<GrSurface> fSrc; |
|
128 SkIRect fSrcRect; |
|
129 SkIPoint fDstPoint; |
|
130 }; |
|
131 |
|
132 // overrides from GrDrawTarget |
|
133 virtual void onDraw(const DrawInfo&) SK_OVERRIDE; |
|
134 virtual void onDrawRect(const SkRect& rect, |
|
135 const SkMatrix* matrix, |
|
136 const SkRect* localRect, |
|
137 const SkMatrix* localMatrix) SK_OVERRIDE; |
|
138 |
|
139 virtual void onStencilPath(const GrPath*, SkPath::FillType) SK_OVERRIDE; |
|
140 virtual void onDrawPath(const GrPath*, SkPath::FillType, |
|
141 const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE; |
|
142 |
|
143 virtual bool onReserveVertexSpace(size_t vertexSize, |
|
144 int vertexCount, |
|
145 void** vertices) SK_OVERRIDE; |
|
146 virtual bool onReserveIndexSpace(int indexCount, |
|
147 void** indices) SK_OVERRIDE; |
|
148 virtual void releaseReservedVertexSpace() SK_OVERRIDE; |
|
149 virtual void releaseReservedIndexSpace() SK_OVERRIDE; |
|
150 virtual void onSetVertexSourceToArray(const void* vertexArray, |
|
151 int vertexCount) SK_OVERRIDE; |
|
152 virtual void onSetIndexSourceToArray(const void* indexArray, |
|
153 int indexCount) SK_OVERRIDE; |
|
154 virtual void releaseVertexArray() SK_OVERRIDE; |
|
155 virtual void releaseIndexArray() SK_OVERRIDE; |
|
156 virtual void geometrySourceWillPush() SK_OVERRIDE; |
|
157 virtual void geometrySourceWillPop(const GeometrySrcState& restoredState) SK_OVERRIDE; |
|
158 virtual void willReserveVertexAndIndexSpace(int vertexCount, |
|
159 int indexCount) SK_OVERRIDE; |
|
160 virtual bool onCopySurface(GrSurface* dst, |
|
161 GrSurface* src, |
|
162 const SkIRect& srcRect, |
|
163 const SkIPoint& dstPoint) SK_OVERRIDE; |
|
164 virtual bool onCanCopySurface(GrSurface* dst, |
|
165 GrSurface* src, |
|
166 const SkIRect& srcRect, |
|
167 const SkIPoint& dstPoint) SK_OVERRIDE; |
|
168 |
|
169 bool quickInsideClip(const SkRect& devBounds); |
|
170 |
|
171 virtual void onInstantGpuTraceEvent(const char* marker) SK_OVERRIDE; |
|
172 virtual void onPushGpuTraceEvent(const char* marker) SK_OVERRIDE; |
|
173 virtual void onPopGpuTraceEvent() SK_OVERRIDE; |
|
174 |
|
175 |
|
176 // Attempts to concat instances from info onto the previous draw. info must represent an |
|
177 // instanced draw. The caller must have already recorded a new draw state and clip if necessary. |
|
178 int concatInstancedDraw(const DrawInfo& info); |
|
179 |
|
180 // we lazily record state and clip changes in order to skip clips and states that have no |
|
181 // effect. |
|
182 bool needsNewState() const; |
|
183 bool needsNewClip() const; |
|
184 |
|
185 // these functions record a command |
|
186 void recordState(); |
|
187 void recordClip(); |
|
188 DrawRecord* recordDraw(const DrawInfo&); |
|
189 StencilPath* recordStencilPath(); |
|
190 DrawPath* recordDrawPath(); |
|
191 Clear* recordClear(); |
|
192 CopySurface* recordCopySurface(); |
|
193 |
|
194 // TODO: Use a single allocator for commands and records |
|
195 enum { |
|
196 kCmdPreallocCnt = 32, |
|
197 kDrawPreallocCnt = 8, |
|
198 kStencilPathPreallocCnt = 8, |
|
199 kDrawPathPreallocCnt = 8, |
|
200 kStatePreallocCnt = 8, |
|
201 kClipPreallocCnt = 8, |
|
202 kClearPreallocCnt = 4, |
|
203 kGeoPoolStatePreAllocCnt = 4, |
|
204 kCopySurfacePreallocCnt = 4, |
|
205 }; |
|
206 |
|
207 SkSTArray<kCmdPreallocCnt, uint8_t, true> fCmds; |
|
208 GrSTAllocator<kDrawPreallocCnt, DrawRecord> fDraws; |
|
209 GrSTAllocator<kStatePreallocCnt, StencilPath> fStencilPaths; |
|
210 GrSTAllocator<kStatePreallocCnt, DrawPath> fDrawPaths; |
|
211 GrSTAllocator<kStatePreallocCnt, GrDrawState::DeferredState> fStates; |
|
212 GrSTAllocator<kClearPreallocCnt, Clear> fClears; |
|
213 GrSTAllocator<kCopySurfacePreallocCnt, CopySurface> fCopySurfaces; |
|
214 GrSTAllocator<kClipPreallocCnt, SkClipStack> fClips; |
|
215 GrSTAllocator<kClipPreallocCnt, SkIPoint> fClipOrigins; |
|
216 |
|
217 GrDrawTarget* fDstGpu; |
|
218 |
|
219 bool fClipSet; |
|
220 |
|
221 enum ClipProxyState { |
|
222 kUnknown_ClipProxyState, |
|
223 kValid_ClipProxyState, |
|
224 kInvalid_ClipProxyState |
|
225 }; |
|
226 ClipProxyState fClipProxyState; |
|
227 SkRect fClipProxy; |
|
228 |
|
229 GrVertexBufferAllocPool& fVertexPool; |
|
230 |
|
231 GrIndexBufferAllocPool& fIndexPool; |
|
232 |
|
233 struct GeometryPoolState { |
|
234 const GrVertexBuffer* fPoolVertexBuffer; |
|
235 int fPoolStartVertex; |
|
236 const GrIndexBuffer* fPoolIndexBuffer; |
|
237 int fPoolStartIndex; |
|
238 // caller may conservatively over reserve vertices / indices. |
|
239 // we release unused space back to allocator if possible |
|
240 // can only do this if there isn't an intervening pushGeometrySource() |
|
241 size_t fUsedPoolVertexBytes; |
|
242 size_t fUsedPoolIndexBytes; |
|
243 }; |
|
244 SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> fGeoPoolStateStack; |
|
245 |
|
246 virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; } |
|
247 |
|
248 bool fFlushing; |
|
249 uint32_t fDrawID; |
|
250 |
|
251 typedef GrDrawTarget INHERITED; |
|
252 }; |
|
253 |
|
254 #endif |