Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
9 #include "GrGpuGL.h"
10 #include "GrGLStencilBuffer.h"
11 #include "GrGLPath.h"
12 #include "GrGLShaderBuilder.h"
13 #include "GrTemplates.h"
14 #include "GrTypes.h"
15 #include "SkStrokeRec.h"
16 #include "SkTemplates.h"
18 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
19 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
21 #define SKIP_CACHE_CHECK true
23 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
24 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
25 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
26 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
27 #else
28 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
29 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
30 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
31 #endif
34 ///////////////////////////////////////////////////////////////////////////////
36 static const GrGLenum gXfermodeCoeff2Blend[] = {
37 GR_GL_ZERO,
38 GR_GL_ONE,
39 GR_GL_SRC_COLOR,
40 GR_GL_ONE_MINUS_SRC_COLOR,
41 GR_GL_DST_COLOR,
42 GR_GL_ONE_MINUS_DST_COLOR,
43 GR_GL_SRC_ALPHA,
44 GR_GL_ONE_MINUS_SRC_ALPHA,
45 GR_GL_DST_ALPHA,
46 GR_GL_ONE_MINUS_DST_ALPHA,
47 GR_GL_CONSTANT_COLOR,
48 GR_GL_ONE_MINUS_CONSTANT_COLOR,
49 GR_GL_CONSTANT_ALPHA,
50 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
52 // extended blend coeffs
53 GR_GL_SRC1_COLOR,
54 GR_GL_ONE_MINUS_SRC1_COLOR,
55 GR_GL_SRC1_ALPHA,
56 GR_GL_ONE_MINUS_SRC1_ALPHA,
57 };
59 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
60 static const bool gCoeffReferencesBlendConst[] = {
61 false,
62 false,
63 false,
64 false,
65 false,
66 false,
67 false,
68 false,
69 false,
70 false,
71 true,
72 true,
73 true,
74 true,
76 // extended blend coeffs
77 false,
78 false,
79 false,
80 false,
81 };
82 return gCoeffReferencesBlendConst[coeff];
83 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
84 GR_ARRAY_COUNT(gCoeffReferencesBlendConst));
86 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
87 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
88 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
89 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
90 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
91 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
92 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
93 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
94 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
95 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
96 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
97 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
98 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
99 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
101 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
102 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
103 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
104 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
106 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
107 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
108 GR_ARRAY_COUNT(gXfermodeCoeff2Blend));
109 }
111 ///////////////////////////////////////////////////////////////////////////////
113 static bool gPrintStartupSpew;
115 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context)
116 : GrGpu(context)
117 , fGLContext(ctx) {
119 SkASSERT(ctx.isInitialized());
120 fCaps.reset(SkRef(ctx.caps()));
122 fHWBoundTextures.reset(this->glCaps().maxFragmentTextureUnits());
123 fHWTexGenSettings.reset(this->glCaps().maxFixedFunctionTextureCoords());
125 GrGLClearErr(fGLContext.interface());
126 if (gPrintStartupSpew) {
127 const GrGLubyte* vendor;
128 const GrGLubyte* renderer;
129 const GrGLubyte* version;
130 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
131 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
132 GL_CALL_RET(version, GetString(GR_GL_VERSION));
133 GrPrintf("------------------------- create GrGpuGL %p --------------\n",
134 this);
135 GrPrintf("------ VENDOR %s\n", vendor);
136 GrPrintf("------ RENDERER %s\n", renderer);
137 GrPrintf("------ VERSION %s\n", version);
138 GrPrintf("------ EXTENSIONS\n");
139 #if 0 // TODO: Reenable this after GrGLInterface's extensions can be accessed safely.
140 ctx.extensions().print();
141 #endif
142 GrPrintf("\n");
143 GrPrintf(this->glCaps().dump().c_str());
144 }
146 fProgramCache = SkNEW_ARGS(ProgramCache, (this));
148 SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt);
150 fLastSuccessfulStencilFmtIdx = 0;
151 fHWProgramID = 0;
152 }
154 GrGpuGL::~GrGpuGL() {
155 if (0 != fHWProgramID) {
156 // detach the current program so there is no confusion on OpenGL's part
157 // that we want it to be deleted
158 SkASSERT(fHWProgramID == fCurrentProgram->programID());
159 GL_CALL(UseProgram(0));
160 }
162 delete fProgramCache;
164 // This must be called by before the GrDrawTarget destructor
165 this->releaseGeometry();
166 // This subclass must do this before the base class destructor runs
167 // since we will unref the GrGLInterface.
168 this->releaseResources();
169 }
171 ///////////////////////////////////////////////////////////////////////////////
174 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig,
175 GrPixelConfig surfaceConfig) const {
176 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) {
177 return kBGRA_8888_GrPixelConfig;
178 } else if (this->glContext().isMesa() &&
179 GrBytesPerPixel(readConfig) == 4 &&
180 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) {
181 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa.
182 // Perhaps this should be guarded by some compiletime or runtime check.
183 return surfaceConfig;
184 } else if (readConfig == kBGRA_8888_GrPixelConfig &&
185 !this->glCaps().readPixelsSupported(this->glInterface(),
186 GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) {
187 return kRGBA_8888_GrPixelConfig;
188 } else {
189 return readConfig;
190 }
191 }
193 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig,
194 GrPixelConfig surfaceConfig) const {
195 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) {
196 return kBGRA_8888_GrPixelConfig;
197 } else {
198 return writeConfig;
199 }
200 }
202 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const {
203 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) {
204 return false;
205 }
206 if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) {
207 // In general ES2 requires the internal format of the texture and the format of the src
208 // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA
209 // texture. It depends upon which extension added BGRA. The Apple extension allows it
210 // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own
211 // internal format).
212 if (this->glCaps().bgraFormatSupport() &&
213 !this->glCaps().bgraIsInternalFormat() &&
214 kBGRA_8888_GrPixelConfig == srcConfig &&
215 kRGBA_8888_GrPixelConfig == texture->config()) {
216 return true;
217 } else {
218 return false;
219 }
220 } else {
221 return true;
222 }
223 }
225 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const {
226 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL);
227 }
229 void GrGpuGL::onResetContext(uint32_t resetBits) {
230 // we don't use the zb at all
231 if (resetBits & kMisc_GrGLBackendState) {
232 GL_CALL(Disable(GR_GL_DEPTH_TEST));
233 GL_CALL(DepthMask(GR_GL_FALSE));
235 fHWDrawFace = GrDrawState::kInvalid_DrawFace;
236 fHWDitherEnabled = kUnknown_TriState;
238 if (kGL_GrGLStandard == this->glStandard()) {
239 // Desktop-only state that we never change
240 if (!this->glCaps().isCoreProfile()) {
241 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
242 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
243 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
244 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
245 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
246 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
247 }
248 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
249 // core profile. This seems like a bug since the core spec removes any mention of
250 // GL_ARB_imaging.
251 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
252 GL_CALL(Disable(GR_GL_COLOR_TABLE));
253 }
254 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
255 // Since ES doesn't support glPointSize at all we always use the VS to
256 // set the point size
257 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
259 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
260 // currently part of our gl interface. There are probably others as
261 // well.
262 }
263 fHWWriteToColor = kUnknown_TriState;
264 // we only ever use lines in hairline mode
265 GL_CALL(LineWidth(1));
266 }
268 if (resetBits & kAA_GrGLBackendState) {
269 fHWAAState.invalidate();
270 }
272 fHWActiveTextureUnitIdx = -1; // invalid
274 if (resetBits & kTextureBinding_GrGLBackendState) {
275 for (int s = 0; s < fHWBoundTextures.count(); ++s) {
276 fHWBoundTextures[s] = NULL;
277 }
278 }
280 if (resetBits & kBlend_GrGLBackendState) {
281 fHWBlendState.invalidate();
282 }
284 if (resetBits & kView_GrGLBackendState) {
285 fHWScissorSettings.invalidate();
286 fHWViewport.invalidate();
287 }
289 if (resetBits & kStencil_GrGLBackendState) {
290 fHWStencilSettings.invalidate();
291 fHWStencilTestEnabled = kUnknown_TriState;
292 }
294 // Vertex
295 if (resetBits & kVertex_GrGLBackendState) {
296 fHWGeometryState.invalidate();
297 }
299 if (resetBits & kRenderTarget_GrGLBackendState) {
300 fHWBoundRenderTarget = NULL;
301 }
303 if (resetBits & (kFixedFunction_GrGLBackendState | kPathRendering_GrGLBackendState)) {
304 if (this->glCaps().fixedFunctionSupport()) {
305 fHWProjectionMatrixState.invalidate();
306 // we don't use the model view matrix.
307 GL_CALL(MatrixMode(GR_GL_MODELVIEW));
308 GL_CALL(LoadIdentity());
310 for (int i = 0; i < this->glCaps().maxFixedFunctionTextureCoords(); ++i) {
311 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + i));
312 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S));
313 GL_CALL(Disable(GR_GL_TEXTURE_GEN_T));
314 GL_CALL(Disable(GR_GL_TEXTURE_GEN_Q));
315 GL_CALL(Disable(GR_GL_TEXTURE_GEN_R));
316 if (this->caps()->pathRenderingSupport()) {
317 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
318 }
319 fHWTexGenSettings[i].fMode = GR_GL_NONE;
320 fHWTexGenSettings[i].fNumComponents = 0;
321 }
322 fHWActiveTexGenSets = 0;
323 }
324 if (this->caps()->pathRenderingSupport()) {
325 fHWPathStencilSettings.invalidate();
326 }
327 }
329 // we assume these values
330 if (resetBits & kPixelStore_GrGLBackendState) {
331 if (this->glCaps().unpackRowLengthSupport()) {
332 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
333 }
334 if (this->glCaps().packRowLengthSupport()) {
335 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
336 }
337 if (this->glCaps().unpackFlipYSupport()) {
338 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
339 }
340 if (this->glCaps().packFlipYSupport()) {
341 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
342 }
343 }
345 if (resetBits & kProgram_GrGLBackendState) {
346 fHWProgramID = 0;
347 fSharedGLProgramState.invalidate();
348 }
349 }
351 namespace {
353 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
354 // By default, GrRenderTargets are GL's normal orientation so that they
355 // can be drawn to by the outside world without the client having
356 // to render upside down.
357 if (kDefault_GrSurfaceOrigin == origin) {
358 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
359 } else {
360 return origin;
361 }
362 }
364 }
366 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
367 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) {
368 return NULL;
369 }
371 if (0 == desc.fTextureHandle) {
372 return NULL;
373 }
375 int maxSize = this->caps()->maxTextureSize();
376 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
377 return NULL;
378 }
380 GrGLTexture::Desc glTexDesc;
381 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
382 glTexDesc.fFlags = (GrTextureFlags) desc.fFlags;
383 glTexDesc.fWidth = desc.fWidth;
384 glTexDesc.fHeight = desc.fHeight;
385 glTexDesc.fConfig = desc.fConfig;
386 glTexDesc.fSampleCnt = desc.fSampleCnt;
387 glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle);
388 glTexDesc.fIsWrapped = true;
389 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
390 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
391 // assuming the old behaviour, which is that backend textures are always
392 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
393 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
394 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
395 glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
396 } else {
397 glTexDesc.fOrigin = desc.fOrigin;
398 }
400 GrGLTexture* texture = NULL;
401 if (renderTarget) {
402 GrGLRenderTarget::Desc glRTDesc;
403 glRTDesc.fRTFBOID = 0;
404 glRTDesc.fTexFBOID = 0;
405 glRTDesc.fMSColorRenderbufferID = 0;
406 glRTDesc.fConfig = desc.fConfig;
407 glRTDesc.fSampleCnt = desc.fSampleCnt;
408 glRTDesc.fOrigin = glTexDesc.fOrigin;
409 glRTDesc.fCheckAllocation = false;
410 if (!this->createRenderTargetObjects(glTexDesc.fWidth,
411 glTexDesc.fHeight,
412 glTexDesc.fTextureID,
413 &glRTDesc)) {
414 return NULL;
415 }
416 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
417 } else {
418 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
419 }
420 if (NULL == texture) {
421 return NULL;
422 }
424 return texture;
425 }
427 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
428 GrGLRenderTarget::Desc glDesc;
429 glDesc.fConfig = desc.fConfig;
430 glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle);
431 glDesc.fMSColorRenderbufferID = 0;
432 glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
433 glDesc.fSampleCnt = desc.fSampleCnt;
434 glDesc.fIsWrapped = true;
435 glDesc.fCheckAllocation = false;
437 glDesc.fOrigin = resolve_origin(desc.fOrigin, true);
438 GrGLIRect viewport;
439 viewport.fLeft = 0;
440 viewport.fBottom = 0;
441 viewport.fWidth = desc.fWidth;
442 viewport.fHeight = desc.fHeight;
444 GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget,
445 (this, glDesc, viewport));
446 if (desc.fStencilBits) {
447 GrGLStencilBuffer::Format format;
448 format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat;
449 format.fPacked = false;
450 format.fStencilBits = desc.fStencilBits;
451 format.fTotalBits = desc.fStencilBits;
452 static const bool kIsSBWrapped = false;
453 GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer,
454 (this,
455 kIsSBWrapped,
456 0,
457 desc.fWidth,
458 desc.fHeight,
459 desc.fSampleCnt,
460 format));
461 tgt->setStencilBuffer(sb);
462 sb->unref();
463 }
464 return tgt;
465 }
467 ////////////////////////////////////////////////////////////////////////////////
469 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture,
470 int left, int top, int width, int height,
471 GrPixelConfig config, const void* buffer,
472 size_t rowBytes) {
473 if (NULL == buffer) {
474 return false;
475 }
476 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
478 this->setScratchTextureUnit();
479 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID()));
480 GrGLTexture::Desc desc;
481 desc.fFlags = glTex->desc().fFlags;
482 desc.fWidth = glTex->width();
483 desc.fHeight = glTex->height();
484 desc.fConfig = glTex->config();
485 desc.fSampleCnt = glTex->desc().fSampleCnt;
486 desc.fTextureID = glTex->textureID();
487 desc.fOrigin = glTex->origin();
489 if (this->uploadTexData(desc, false,
490 left, top, width, height,
491 config, buffer, rowBytes)) {
492 texture->dirtyMipMaps(true);
493 return true;
494 } else {
495 return false;
496 }
497 }
499 namespace {
500 bool adjust_pixel_ops_params(int surfaceWidth,
501 int surfaceHeight,
502 size_t bpp,
503 int* left, int* top, int* width, int* height,
504 const void** data,
505 size_t* rowBytes) {
506 if (!*rowBytes) {
507 *rowBytes = *width * bpp;
508 }
510 SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height);
511 SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
513 if (!subRect.intersect(bounds)) {
514 return false;
515 }
516 *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) +
517 (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp);
519 *left = subRect.fLeft;
520 *top = subRect.fTop;
521 *width = subRect.width();
522 *height = subRect.height();
523 return true;
524 }
526 GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) {
527 if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) {
528 return GR_GL_GET_ERROR(interface);
529 } else {
530 return CHECK_ALLOC_ERROR(interface);
531 }
532 }
534 }
536 bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc,
537 bool isNewTexture,
538 int left, int top, int width, int height,
539 GrPixelConfig dataConfig,
540 const void* data,
541 size_t rowBytes) {
542 SkASSERT(NULL != data || isNewTexture);
544 size_t bpp = GrBytesPerPixel(dataConfig);
545 if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top,
546 &width, &height, &data, &rowBytes)) {
547 return false;
548 }
549 size_t trimRowBytes = width * bpp;
551 // in case we need a temporary, trimmed copy of the src pixels
552 SkAutoSMalloc<128 * 128> tempStorage;
554 // paletted textures cannot be partially updated
555 // We currently lazily create MIPMAPs when the we see a draw with
556 // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the
557 // MIP levels are all created when the texture is created. So for now we don't use
558 // texture storage.
559 bool useTexStorage = false &&
560 isNewTexture &&
561 desc.fConfig != kIndex_8_GrPixelConfig &&
562 this->glCaps().texStorageSupport();
564 if (useTexStorage && kGL_GrGLStandard == this->glStandard()) {
565 // 565 is not a sized internal format on desktop GL. So on desktop with
566 // 565 we always use an unsized internal format to let the system pick
567 // the best sized format to convert the 565 data to. Since TexStorage
568 // only allows sized internal formats we will instead use TexImage2D.
569 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
570 }
572 GrGLenum internalFormat;
573 GrGLenum externalFormat;
574 GrGLenum externalType;
575 // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
576 // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the
577 // size of the internal format whenever possible and so only use a sized internal format when
578 // using texture storage.
579 if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat,
580 &externalFormat, &externalType)) {
581 return false;
582 }
584 if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) {
585 // paletted textures cannot be updated
586 return false;
587 }
589 /*
590 * check whether to allocate a temporary buffer for flipping y or
591 * because our srcData has extra bytes past each row. If so, we need
592 * to trim those off here, since GL ES may not let us specify
593 * GL_UNPACK_ROW_LENGTH.
594 */
595 bool restoreGLRowLength = false;
596 bool swFlipY = false;
597 bool glFlipY = false;
598 if (NULL != data) {
599 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
600 if (this->glCaps().unpackFlipYSupport()) {
601 glFlipY = true;
602 } else {
603 swFlipY = true;
604 }
605 }
606 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
607 // can't use this for flipping, only non-neg values allowed. :(
608 if (rowBytes != trimRowBytes) {
609 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
610 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
611 restoreGLRowLength = true;
612 }
613 } else {
614 if (trimRowBytes != rowBytes || swFlipY) {
615 // copy data into our new storage, skipping the trailing bytes
616 size_t trimSize = height * trimRowBytes;
617 const char* src = (const char*)data;
618 if (swFlipY) {
619 src += (height - 1) * rowBytes;
620 }
621 char* dst = (char*)tempStorage.reset(trimSize);
622 for (int y = 0; y < height; y++) {
623 memcpy(dst, src, trimRowBytes);
624 if (swFlipY) {
625 src -= rowBytes;
626 } else {
627 src += rowBytes;
628 }
629 dst += trimRowBytes;
630 }
631 // now point data to our copied version
632 data = tempStorage.get();
633 }
634 }
635 if (glFlipY) {
636 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
637 }
638 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast<GrGLint>(bpp)));
639 }
640 bool succeeded = true;
641 if (isNewTexture &&
642 0 == left && 0 == top &&
643 desc.fWidth == width && desc.fHeight == height) {
644 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
645 if (useTexStorage) {
646 // We never resize or change formats of textures.
647 GL_ALLOC_CALL(this->glInterface(),
648 TexStorage2D(GR_GL_TEXTURE_2D,
649 1, // levels
650 internalFormat,
651 desc.fWidth, desc.fHeight));
652 } else {
653 if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
654 GrGLsizei imageSize = desc.fWidth * desc.fHeight +
655 kGrColorTableSize;
656 GL_ALLOC_CALL(this->glInterface(),
657 CompressedTexImage2D(GR_GL_TEXTURE_2D,
658 0, // level
659 internalFormat,
660 desc.fWidth, desc.fHeight,
661 0, // border
662 imageSize,
663 data));
664 } else {
665 GL_ALLOC_CALL(this->glInterface(),
666 TexImage2D(GR_GL_TEXTURE_2D,
667 0, // level
668 internalFormat,
669 desc.fWidth, desc.fHeight,
670 0, // border
671 externalFormat, externalType,
672 data));
673 }
674 }
675 GrGLenum error = check_alloc_error(desc, this->glInterface());
676 if (error != GR_GL_NO_ERROR) {
677 succeeded = false;
678 } else {
679 // if we have data and we used TexStorage to create the texture, we
680 // now upload with TexSubImage.
681 if (NULL != data && useTexStorage) {
682 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
683 0, // level
684 left, top,
685 width, height,
686 externalFormat, externalType,
687 data));
688 }
689 }
690 } else {
691 if (swFlipY || glFlipY) {
692 top = desc.fHeight - (top + height);
693 }
694 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
695 0, // level
696 left, top,
697 width, height,
698 externalFormat, externalType, data));
699 }
701 if (restoreGLRowLength) {
702 SkASSERT(this->glCaps().unpackRowLengthSupport());
703 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
704 }
705 if (glFlipY) {
706 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
707 }
708 return succeeded;
709 }
711 static bool renderbuffer_storage_msaa(GrGLContext& ctx,
712 int sampleCount,
713 GrGLenum format,
714 int width, int height) {
715 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
716 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
717 switch (ctx.caps()->msFBOType()) {
718 case GrGLCaps::kDesktop_ARB_MSFBOType:
719 case GrGLCaps::kDesktop_EXT_MSFBOType:
720 case GrGLCaps::kES_3_0_MSFBOType:
721 GL_ALLOC_CALL(ctx.interface(),
722 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
723 sampleCount,
724 format,
725 width, height));
726 break;
727 case GrGLCaps::kES_Apple_MSFBOType:
728 GL_ALLOC_CALL(ctx.interface(),
729 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
730 sampleCount,
731 format,
732 width, height));
733 break;
734 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
735 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
736 GL_ALLOC_CALL(ctx.interface(),
737 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
738 sampleCount,
739 format,
740 width, height));
741 break;
742 case GrGLCaps::kNone_MSFBOType:
743 GrCrash("Shouldn't be here if we don't support multisampled renderbuffers.");
744 break;
745 }
746 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));;
747 }
749 bool GrGpuGL::createRenderTargetObjects(int width, int height,
750 GrGLuint texID,
751 GrGLRenderTarget::Desc* desc) {
752 desc->fMSColorRenderbufferID = 0;
753 desc->fRTFBOID = 0;
754 desc->fTexFBOID = 0;
755 desc->fIsWrapped = false;
757 GrGLenum status;
759 GrGLenum msColorFormat = 0; // suppress warning
761 if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
762 goto FAILED;
763 }
765 GL_CALL(GenFramebuffers(1, &desc->fTexFBOID));
766 if (!desc->fTexFBOID) {
767 goto FAILED;
768 }
771 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
772 // the texture bound to the other. The exception is the IMG multisample extension. With this
773 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
774 // rendered from.
775 if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
776 GL_CALL(GenFramebuffers(1, &desc->fRTFBOID));
777 GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID));
778 if (!desc->fRTFBOID ||
779 !desc->fMSColorRenderbufferID ||
780 !this->configToGLFormats(desc->fConfig,
781 // ES2 and ES3 require sized internal formats for rb storage.
782 kGLES_GrGLStandard == this->glStandard(),
783 &msColorFormat,
784 NULL,
785 NULL)) {
786 goto FAILED;
787 }
788 } else {
789 desc->fRTFBOID = desc->fTexFBOID;
790 }
792 // below here we may bind the FBO
793 fHWBoundRenderTarget = NULL;
794 if (desc->fRTFBOID != desc->fTexFBOID) {
795 SkASSERT(desc->fSampleCnt > 0);
796 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER,
797 desc->fMSColorRenderbufferID));
798 if (!renderbuffer_storage_msaa(fGLContext,
799 desc->fSampleCnt,
800 msColorFormat,
801 width, height)) {
802 goto FAILED;
803 }
804 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID));
805 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
806 GR_GL_COLOR_ATTACHMENT0,
807 GR_GL_RENDERBUFFER,
808 desc->fMSColorRenderbufferID));
809 if (desc->fCheckAllocation ||
810 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
811 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
812 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
813 goto FAILED;
814 }
815 fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig);
816 }
817 }
818 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID));
820 if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) {
821 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
822 GR_GL_COLOR_ATTACHMENT0,
823 GR_GL_TEXTURE_2D,
824 texID, 0, desc->fSampleCnt));
825 } else {
826 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
827 GR_GL_COLOR_ATTACHMENT0,
828 GR_GL_TEXTURE_2D,
829 texID, 0));
830 }
831 if (desc->fCheckAllocation ||
832 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
833 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
834 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
835 goto FAILED;
836 }
837 fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig);
838 }
840 return true;
842 FAILED:
843 if (desc->fMSColorRenderbufferID) {
844 GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID));
845 }
846 if (desc->fRTFBOID != desc->fTexFBOID) {
847 GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID));
848 }
849 if (desc->fTexFBOID) {
850 GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID));
851 }
852 return false;
853 }
855 // good to set a break-point here to know when createTexture fails
856 static GrTexture* return_null_texture() {
857 // SkDEBUGFAIL("null texture");
858 return NULL;
859 }
861 #if 0 && defined(SK_DEBUG)
862 static size_t as_size_t(int x) {
863 return x;
864 }
865 #endif
867 GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
868 const void* srcData,
869 size_t rowBytes) {
871 GrGLTexture::Desc glTexDesc;
872 GrGLRenderTarget::Desc glRTDesc;
874 // Attempt to catch un- or wrongly initialized sample counts;
875 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
876 // We fail if the MSAA was requested and is not available.
877 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
878 //GrPrintf("MSAA RT requested but not supported on this platform.");
879 return return_null_texture();
880 }
881 // If the sample count exceeds the max then we clamp it.
882 glTexDesc.fSampleCnt = GrMin(desc.fSampleCnt, this->caps()->maxSampleCount());
884 glTexDesc.fFlags = desc.fFlags;
885 glTexDesc.fWidth = desc.fWidth;
886 glTexDesc.fHeight = desc.fHeight;
887 glTexDesc.fConfig = desc.fConfig;
888 glTexDesc.fIsWrapped = false;
890 glRTDesc.fMSColorRenderbufferID = 0;
891 glRTDesc.fRTFBOID = 0;
892 glRTDesc.fTexFBOID = 0;
893 glRTDesc.fIsWrapped = false;
894 glRTDesc.fConfig = glTexDesc.fConfig;
895 glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit);
897 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit);
899 glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
900 glRTDesc.fOrigin = glTexDesc.fOrigin;
902 glRTDesc.fSampleCnt = glTexDesc.fSampleCnt;
903 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() &&
904 desc.fSampleCnt) {
905 //GrPrintf("MSAA RT requested but not supported on this platform.");
906 return return_null_texture();
907 }
909 if (renderTarget) {
910 int maxRTSize = this->caps()->maxRenderTargetSize();
911 if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) {
912 return return_null_texture();
913 }
914 } else {
915 int maxSize = this->caps()->maxTextureSize();
916 if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) {
917 return return_null_texture();
918 }
919 }
921 GL_CALL(GenTextures(1, &glTexDesc.fTextureID));
923 if (!glTexDesc.fTextureID) {
924 return return_null_texture();
925 }
927 this->setScratchTextureUnit();
928 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID));
930 if (renderTarget && this->glCaps().textureUsageSupport()) {
931 // provides a hint about how this texture will be used
932 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
933 GR_GL_TEXTURE_USAGE,
934 GR_GL_FRAMEBUFFER_ATTACHMENT));
935 }
937 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
938 // drivers have a bug where an FBO won't be complete if it includes a
939 // texture that is not mipmap complete (considering the filter in use).
940 GrGLTexture::TexParams initialTexParams;
941 // we only set a subset here so invalidate first
942 initialTexParams.invalidate();
943 initialTexParams.fMinFilter = GR_GL_NEAREST;
944 initialTexParams.fMagFilter = GR_GL_NEAREST;
945 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
946 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
947 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
948 GR_GL_TEXTURE_MAG_FILTER,
949 initialTexParams.fMagFilter));
950 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
951 GR_GL_TEXTURE_MIN_FILTER,
952 initialTexParams.fMinFilter));
953 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
954 GR_GL_TEXTURE_WRAP_S,
955 initialTexParams.fWrapS));
956 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
957 GR_GL_TEXTURE_WRAP_T,
958 initialTexParams.fWrapT));
959 if (!this->uploadTexData(glTexDesc, true, 0, 0,
960 glTexDesc.fWidth, glTexDesc.fHeight,
961 desc.fConfig, srcData, rowBytes)) {
962 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
963 return return_null_texture();
964 }
966 GrGLTexture* tex;
967 if (renderTarget) {
968 // unbind the texture from the texture unit before binding it to the frame buffer
969 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
971 if (!this->createRenderTargetObjects(glTexDesc.fWidth,
972 glTexDesc.fHeight,
973 glTexDesc.fTextureID,
974 &glRTDesc)) {
975 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
976 return return_null_texture();
977 }
978 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
979 } else {
980 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
981 }
982 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
983 #ifdef TRACE_TEXTURE_CREATION
984 GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n",
985 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
986 #endif
987 return tex;
988 }
990 namespace {
992 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount;
994 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
995 GrGLStencilBuffer::Format* format) {
997 // we shouldn't ever know one size and not the other
998 SkASSERT((kUnknownBitCount == format->fStencilBits) ==
999 (kUnknownBitCount == format->fTotalBits));
1000 if (kUnknownBitCount == format->fStencilBits) {
1001 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1002 GR_GL_RENDERBUFFER_STENCIL_SIZE,
1003 (GrGLint*)&format->fStencilBits);
1004 if (format->fPacked) {
1005 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1006 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1007 (GrGLint*)&format->fTotalBits);
1008 format->fTotalBits += format->fStencilBits;
1009 } else {
1010 format->fTotalBits = format->fStencilBits;
1011 }
1012 }
1013 }
1014 }
1016 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt,
1017 int width, int height) {
1019 // All internally created RTs are also textures. We don't create
1020 // SBs for a client's standalone RT (that is a RT that isn't also a texture).
1021 SkASSERT(rt->asTexture());
1022 SkASSERT(width >= rt->width());
1023 SkASSERT(height >= rt->height());
1025 int samples = rt->numSamples();
1026 GrGLuint sbID;
1027 GL_CALL(GenRenderbuffers(1, &sbID));
1028 if (!sbID) {
1029 return false;
1030 }
1032 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1033 for (int i = 0; i < stencilFmtCnt; ++i) {
1034 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID));
1035 // we start with the last stencil format that succeeded in hopes
1036 // that we won't go through this loop more than once after the
1037 // first (painful) stencil creation.
1038 int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt;
1039 const GrGLCaps::StencilFormat& sFmt =
1040 this->glCaps().stencilFormats()[sIdx];
1041 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1042 // we do this "if" so that we don't call the multisample
1043 // version on a GL that doesn't have an MSAA extension.
1044 bool created;
1045 if (samples > 0) {
1046 created = renderbuffer_storage_msaa(fGLContext,
1047 samples,
1048 sFmt.fInternalFormat,
1049 width, height);
1050 } else {
1051 GL_ALLOC_CALL(this->glInterface(),
1052 RenderbufferStorage(GR_GL_RENDERBUFFER,
1053 sFmt.fInternalFormat,
1054 width, height));
1055 created =
1056 (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
1057 }
1058 if (created) {
1059 // After sized formats we attempt an unsized format and take
1060 // whatever sizes GL gives us. In that case we query for the size.
1061 GrGLStencilBuffer::Format format = sFmt;
1062 get_stencil_rb_sizes(this->glInterface(), &format);
1063 static const bool kIsWrapped = false;
1064 SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer,
1065 (this, kIsWrapped, sbID, width, height,
1066 samples, format)));
1067 if (this->attachStencilBufferToRenderTarget(sb, rt)) {
1068 fLastSuccessfulStencilFmtIdx = sIdx;
1069 sb->transferToCache();
1070 rt->setStencilBuffer(sb);
1071 return true;
1072 }
1073 sb->abandon(); // otherwise we lose sbID
1074 }
1075 }
1076 GL_CALL(DeleteRenderbuffers(1, &sbID));
1077 return false;
1078 }
1080 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) {
1081 GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt;
1083 GrGLuint fbo = glrt->renderFBOID();
1085 if (NULL == sb) {
1086 if (NULL != rt->getStencilBuffer()) {
1087 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1088 GR_GL_STENCIL_ATTACHMENT,
1089 GR_GL_RENDERBUFFER, 0));
1090 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1091 GR_GL_DEPTH_ATTACHMENT,
1092 GR_GL_RENDERBUFFER, 0));
1093 #ifdef SK_DEBUG
1094 GrGLenum status;
1095 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1096 SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
1097 #endif
1098 }
1099 return true;
1100 } else {
1101 GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb);
1102 GrGLuint rb = glsb->renderbufferID();
1104 fHWBoundRenderTarget = NULL;
1105 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo));
1106 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1107 GR_GL_STENCIL_ATTACHMENT,
1108 GR_GL_RENDERBUFFER, rb));
1109 if (glsb->format().fPacked) {
1110 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1111 GR_GL_DEPTH_ATTACHMENT,
1112 GR_GL_RENDERBUFFER, rb));
1113 } else {
1114 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1115 GR_GL_DEPTH_ATTACHMENT,
1116 GR_GL_RENDERBUFFER, 0));
1117 }
1119 GrGLenum status;
1120 if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) {
1121 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1122 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1123 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1124 GR_GL_STENCIL_ATTACHMENT,
1125 GR_GL_RENDERBUFFER, 0));
1126 if (glsb->format().fPacked) {
1127 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1128 GR_GL_DEPTH_ATTACHMENT,
1129 GR_GL_RENDERBUFFER, 0));
1130 }
1131 return false;
1132 } else {
1133 fGLContext.caps()->markColorConfigAndStencilFormatAsVerified(
1134 rt->config(),
1135 glsb->format());
1136 }
1137 }
1138 return true;
1139 }
1140 }
1142 ////////////////////////////////////////////////////////////////////////////////
1144 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) {
1145 GrGLVertexBuffer::Desc desc;
1146 desc.fDynamic = dynamic;
1147 desc.fSizeInBytes = size;
1148 desc.fIsWrapped = false;
1150 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1151 desc.fID = 0;
1152 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1153 return vertexBuffer;
1154 } else {
1155 GL_CALL(GenBuffers(1, &desc.fID));
1156 if (desc.fID) {
1157 fHWGeometryState.setVertexBufferID(this, desc.fID);
1158 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1159 // make sure driver can allocate memory for this buffer
1160 GL_ALLOC_CALL(this->glInterface(),
1161 BufferData(GR_GL_ARRAY_BUFFER,
1162 (GrGLsizeiptr) desc.fSizeInBytes,
1163 NULL, // data ptr
1164 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1165 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1166 GL_CALL(DeleteBuffers(1, &desc.fID));
1167 this->notifyVertexBufferDelete(desc.fID);
1168 return NULL;
1169 }
1170 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1171 return vertexBuffer;
1172 }
1173 return NULL;
1174 }
1175 }
1177 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) {
1178 GrGLIndexBuffer::Desc desc;
1179 desc.fDynamic = dynamic;
1180 desc.fSizeInBytes = size;
1181 desc.fIsWrapped = false;
1183 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1184 desc.fID = 0;
1185 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1186 return indexBuffer;
1187 } else {
1188 GL_CALL(GenBuffers(1, &desc.fID));
1189 if (desc.fID) {
1190 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
1191 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1192 // make sure driver can allocate memory for this buffer
1193 GL_ALLOC_CALL(this->glInterface(),
1194 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1195 (GrGLsizeiptr) desc.fSizeInBytes,
1196 NULL, // data ptr
1197 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1198 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1199 GL_CALL(DeleteBuffers(1, &desc.fID));
1200 this->notifyIndexBufferDelete(desc.fID);
1201 return NULL;
1202 }
1203 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1204 return indexBuffer;
1205 }
1206 return NULL;
1207 }
1208 }
1210 GrPath* GrGpuGL::onCreatePath(const SkPath& inPath, const SkStrokeRec& stroke) {
1211 SkASSERT(this->caps()->pathRenderingSupport());
1212 return SkNEW_ARGS(GrGLPath, (this, inPath, stroke));
1213 }
1215 void GrGpuGL::flushScissor() {
1216 if (fScissorState.fEnabled) {
1217 // Only access the RT if scissoring is being enabled. We can call this before performing
1218 // a glBitframebuffer for a surface->surface copy, which requires no RT to be bound to the
1219 // GrDrawState.
1220 const GrDrawState& drawState = this->getDrawState();
1221 const GrGLRenderTarget* rt =
1222 static_cast<const GrGLRenderTarget*>(drawState.getRenderTarget());
1224 SkASSERT(NULL != rt);
1225 const GrGLIRect& vp = rt->getViewport();
1226 GrGLIRect scissor;
1227 scissor.setRelativeTo(vp,
1228 fScissorState.fRect.fLeft,
1229 fScissorState.fRect.fTop,
1230 fScissorState.fRect.width(),
1231 fScissorState.fRect.height(),
1232 rt->origin());
1233 // if the scissor fully contains the viewport then we fall through and
1234 // disable the scissor test.
1235 if (!scissor.contains(vp)) {
1236 if (fHWScissorSettings.fRect != scissor) {
1237 scissor.pushToGLScissor(this->glInterface());
1238 fHWScissorSettings.fRect = scissor;
1239 }
1240 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1241 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1242 fHWScissorSettings.fEnabled = kYes_TriState;
1243 }
1244 return;
1245 }
1246 }
1247 if (kNo_TriState != fHWScissorSettings.fEnabled) {
1248 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
1249 fHWScissorSettings.fEnabled = kNo_TriState;
1250 return;
1251 }
1252 }
1254 void GrGpuGL::onClear(const SkIRect* rect, GrColor color, bool canIgnoreRect) {
1255 const GrDrawState& drawState = this->getDrawState();
1256 const GrRenderTarget* rt = drawState.getRenderTarget();
1257 // parent class should never let us get here with no RT
1258 SkASSERT(NULL != rt);
1260 if (canIgnoreRect && this->glCaps().fullClearIsFree()) {
1261 rect = NULL;
1262 }
1264 SkIRect clippedRect;
1265 if (NULL != rect) {
1266 // flushScissor expects rect to be clipped to the target.
1267 clippedRect = *rect;
1268 SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height());
1269 if (clippedRect.intersect(rtRect)) {
1270 rect = &clippedRect;
1271 } else {
1272 return;
1273 }
1274 }
1276 this->flushRenderTarget(rect);
1277 GrAutoTRestore<ScissorState> asr(&fScissorState);
1278 fScissorState.fEnabled = (NULL != rect);
1279 if (fScissorState.fEnabled) {
1280 fScissorState.fRect = *rect;
1281 }
1282 this->flushScissor();
1284 GrGLfloat r, g, b, a;
1285 static const GrGLfloat scale255 = 1.f / 255.f;
1286 a = GrColorUnpackA(color) * scale255;
1287 GrGLfloat scaleRGB = scale255;
1288 r = GrColorUnpackR(color) * scaleRGB;
1289 g = GrColorUnpackG(color) * scaleRGB;
1290 b = GrColorUnpackB(color) * scaleRGB;
1292 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
1293 fHWWriteToColor = kYes_TriState;
1294 GL_CALL(ClearColor(r, g, b, a));
1295 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1296 }
1298 void GrGpuGL::clearStencil() {
1299 if (NULL == this->getDrawState().getRenderTarget()) {
1300 return;
1301 }
1303 this->flushRenderTarget(&SkIRect::EmptyIRect());
1305 GrAutoTRestore<ScissorState> asr(&fScissorState);
1306 fScissorState.fEnabled = false;
1307 this->flushScissor();
1309 GL_CALL(StencilMask(0xffffffff));
1310 GL_CALL(ClearStencil(0));
1311 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1312 fHWStencilSettings.invalidate();
1313 }
1315 void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) {
1316 const GrDrawState& drawState = this->getDrawState();
1317 const GrRenderTarget* rt = drawState.getRenderTarget();
1318 SkASSERT(NULL != rt);
1320 // this should only be called internally when we know we have a
1321 // stencil buffer.
1322 SkASSERT(NULL != rt->getStencilBuffer());
1323 GrGLint stencilBitCount = rt->getStencilBuffer()->bits();
1324 #if 0
1325 SkASSERT(stencilBitCount > 0);
1326 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
1327 #else
1328 // we could just clear the clip bit but when we go through
1329 // ANGLE a partial stencil mask will cause clears to be
1330 // turned into draws. Our contract on GrDrawTarget says that
1331 // changing the clip between stencil passes may or may not
1332 // zero the client's clip bits. So we just clear the whole thing.
1333 static const GrGLint clipStencilMask = ~0;
1334 #endif
1335 GrGLint value;
1336 if (insideClip) {
1337 value = (1 << (stencilBitCount - 1));
1338 } else {
1339 value = 0;
1340 }
1341 this->flushRenderTarget(&SkIRect::EmptyIRect());
1343 GrAutoTRestore<ScissorState> asr(&fScissorState);
1344 fScissorState.fEnabled = true;
1345 fScissorState.fRect = rect;
1346 this->flushScissor();
1348 GL_CALL(StencilMask((uint32_t) clipStencilMask));
1349 GL_CALL(ClearStencil(value));
1350 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1351 fHWStencilSettings.invalidate();
1352 }
1354 void GrGpuGL::onForceRenderTargetFlush() {
1355 this->flushRenderTarget(&SkIRect::EmptyIRect());
1356 }
1358 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
1359 int left, int top,
1360 int width, int height,
1361 GrPixelConfig config,
1362 size_t rowBytes) const {
1363 // If this rendertarget is aready TopLeft, we don't need to flip.
1364 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
1365 return false;
1366 }
1368 // if GL can do the flip then we'll never pay for it.
1369 if (this->glCaps().packFlipYSupport()) {
1370 return false;
1371 }
1373 // If we have to do memcpy to handle non-trim rowBytes then we
1374 // get the flip for free. Otherwise it costs.
1375 if (this->glCaps().packRowLengthSupport()) {
1376 return true;
1377 }
1378 // If we have to do memcpys to handle rowBytes then y-flip is free
1379 // Note the rowBytes might be tight to the passed in data, but if data
1380 // gets clipped in x to the target the rowBytes will no longer be tight.
1381 if (left >= 0 && (left + width) < renderTarget->width()) {
1382 return 0 == rowBytes ||
1383 GrBytesPerPixel(config) * width == rowBytes;
1384 } else {
1385 return false;
1386 }
1387 }
1389 bool GrGpuGL::onReadPixels(GrRenderTarget* target,
1390 int left, int top,
1391 int width, int height,
1392 GrPixelConfig config,
1393 void* buffer,
1394 size_t rowBytes) {
1395 GrGLenum format;
1396 GrGLenum type;
1397 bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
1398 if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
1399 return false;
1400 }
1401 size_t bpp = GrBytesPerPixel(config);
1402 if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
1403 &left, &top, &width, &height,
1404 const_cast<const void**>(&buffer),
1405 &rowBytes)) {
1406 return false;
1407 }
1409 // resolve the render target if necessary
1410 GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target);
1411 GrDrawState::AutoRenderTargetRestore artr;
1412 switch (tgt->getResolveType()) {
1413 case GrGLRenderTarget::kCantResolve_ResolveType:
1414 return false;
1415 case GrGLRenderTarget::kAutoResolves_ResolveType:
1416 artr.set(this->drawState(), target);
1417 this->flushRenderTarget(&SkIRect::EmptyIRect());
1418 break;
1419 case GrGLRenderTarget::kCanResolve_ResolveType:
1420 this->onResolveRenderTarget(tgt);
1421 // we don't track the state of the READ FBO ID.
1422 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
1423 tgt->textureFBOID()));
1424 break;
1425 default:
1426 GrCrash("Unknown resolve type");
1427 }
1429 const GrGLIRect& glvp = tgt->getViewport();
1431 // the read rect is viewport-relative
1432 GrGLIRect readRect;
1433 readRect.setRelativeTo(glvp, left, top, width, height, target->origin());
1435 size_t tightRowBytes = bpp * width;
1436 if (0 == rowBytes) {
1437 rowBytes = tightRowBytes;
1438 }
1439 size_t readDstRowBytes = tightRowBytes;
1440 void* readDst = buffer;
1442 // determine if GL can read using the passed rowBytes or if we need
1443 // a scratch buffer.
1444 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1445 if (rowBytes != tightRowBytes) {
1446 if (this->glCaps().packRowLengthSupport()) {
1447 SkASSERT(!(rowBytes % sizeof(GrColor)));
1448 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
1449 static_cast<GrGLint>(rowBytes / sizeof(GrColor))));
1450 readDstRowBytes = rowBytes;
1451 } else {
1452 scratch.reset(tightRowBytes * height);
1453 readDst = scratch.get();
1454 }
1455 }
1456 if (flipY && this->glCaps().packFlipYSupport()) {
1457 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
1458 }
1459 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
1460 readRect.fWidth, readRect.fHeight,
1461 format, type, readDst));
1462 if (readDstRowBytes != tightRowBytes) {
1463 SkASSERT(this->glCaps().packRowLengthSupport());
1464 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
1465 }
1466 if (flipY && this->glCaps().packFlipYSupport()) {
1467 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
1468 flipY = false;
1469 }
1471 // now reverse the order of the rows, since GL's are bottom-to-top, but our
1472 // API presents top-to-bottom. We must preserve the padding contents. Note
1473 // that the above readPixels did not overwrite the padding.
1474 if (readDst == buffer) {
1475 SkASSERT(rowBytes == readDstRowBytes);
1476 if (flipY) {
1477 scratch.reset(tightRowBytes);
1478 void* tmpRow = scratch.get();
1479 // flip y in-place by rows
1480 const int halfY = height >> 1;
1481 char* top = reinterpret_cast<char*>(buffer);
1482 char* bottom = top + (height - 1) * rowBytes;
1483 for (int y = 0; y < halfY; y++) {
1484 memcpy(tmpRow, top, tightRowBytes);
1485 memcpy(top, bottom, tightRowBytes);
1486 memcpy(bottom, tmpRow, tightRowBytes);
1487 top += rowBytes;
1488 bottom -= rowBytes;
1489 }
1490 }
1491 } else {
1492 SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes);
1493 // copy from readDst to buffer while flipping y
1494 // const int halfY = height >> 1;
1495 const char* src = reinterpret_cast<const char*>(readDst);
1496 char* dst = reinterpret_cast<char*>(buffer);
1497 if (flipY) {
1498 dst += (height-1) * rowBytes;
1499 }
1500 for (int y = 0; y < height; y++) {
1501 memcpy(dst, src, tightRowBytes);
1502 src += readDstRowBytes;
1503 if (!flipY) {
1504 dst += rowBytes;
1505 } else {
1506 dst -= rowBytes;
1507 }
1508 }
1509 }
1510 return true;
1511 }
1513 void GrGpuGL::flushRenderTarget(const SkIRect* bound) {
1515 GrGLRenderTarget* rt =
1516 static_cast<GrGLRenderTarget*>(this->drawState()->getRenderTarget());
1517 SkASSERT(NULL != rt);
1519 if (fHWBoundRenderTarget != rt) {
1520 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID()));
1521 #ifdef SK_DEBUG
1522 // don't do this check in Chromium -- this is causing
1523 // lots of repeated command buffer flushes when the compositor is
1524 // rendering with Ganesh, which is really slow; even too slow for
1525 // Debug mode.
1526 if (!this->glContext().isChromium()) {
1527 GrGLenum status;
1528 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1529 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1530 GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status);
1531 }
1532 }
1533 #endif
1534 fHWBoundRenderTarget = rt;
1535 const GrGLIRect& vp = rt->getViewport();
1536 if (fHWViewport != vp) {
1537 vp.pushToGLViewport(this->glInterface());
1538 fHWViewport = vp;
1539 }
1540 }
1541 if (NULL == bound || !bound->isEmpty()) {
1542 rt->flagAsNeedingResolve(bound);
1543 }
1545 GrTexture *texture = rt->asTexture();
1546 if (texture) {
1547 texture->dirtyMipMaps(true);
1548 }
1549 }
1551 GrGLenum gPrimitiveType2GLMode[] = {
1552 GR_GL_TRIANGLES,
1553 GR_GL_TRIANGLE_STRIP,
1554 GR_GL_TRIANGLE_FAN,
1555 GR_GL_POINTS,
1556 GR_GL_LINES,
1557 GR_GL_LINE_STRIP
1558 };
1560 #define SWAP_PER_DRAW 0
1562 #if SWAP_PER_DRAW
1563 #if defined(SK_BUILD_FOR_MAC)
1564 #include <AGL/agl.h>
1565 #elif defined(SK_BUILD_FOR_WIN32)
1566 #include <gl/GL.h>
1567 void SwapBuf() {
1568 DWORD procID = GetCurrentProcessId();
1569 HWND hwnd = GetTopWindow(GetDesktopWindow());
1570 while(hwnd) {
1571 DWORD wndProcID = 0;
1572 GetWindowThreadProcessId(hwnd, &wndProcID);
1573 if(wndProcID == procID) {
1574 SwapBuffers(GetDC(hwnd));
1575 }
1576 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
1577 }
1578 }
1579 #endif
1580 #endif
1582 void GrGpuGL::onGpuDraw(const DrawInfo& info) {
1583 size_t indexOffsetInBytes;
1584 this->setupGeometry(info, &indexOffsetInBytes);
1586 SkASSERT((size_t)info.primitiveType() < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
1588 if (info.isIndexed()) {
1589 GrGLvoid* indices =
1590 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex());
1591 // info.startVertex() was accounted for by setupGeometry.
1592 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()],
1593 info.indexCount(),
1594 GR_GL_UNSIGNED_SHORT,
1595 indices));
1596 } else {
1597 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for
1598 // startVertex in the DrawElements case. So we always rely on setupGeometry to have
1599 // accounted for startVertex.
1600 GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount()));
1601 }
1602 #if SWAP_PER_DRAW
1603 glFlush();
1604 #if defined(SK_BUILD_FOR_MAC)
1605 aglSwapBuffers(aglGetCurrentContext());
1606 int set_a_break_pt_here = 9;
1607 aglSwapBuffers(aglGetCurrentContext());
1608 #elif defined(SK_BUILD_FOR_WIN32)
1609 SwapBuf();
1610 int set_a_break_pt_here = 9;
1611 SwapBuf();
1612 #endif
1613 #endif
1614 }
1616 static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
1617 switch (op) {
1618 default:
1619 GrCrash("Unexpected path fill.");
1620 /* fallthrough */;
1621 case kIncClamp_StencilOp:
1622 return GR_GL_COUNT_UP;
1623 case kInvert_StencilOp:
1624 return GR_GL_INVERT;
1625 }
1626 }
1628 void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) {
1629 SkASSERT(this->caps()->pathRenderingSupport());
1631 GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
1632 SkASSERT(NULL != this->drawState()->getRenderTarget());
1633 SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
1635 flushPathStencilSettings(fill);
1637 // Decide how to manipulate the stencil buffer based on the fill rule.
1638 SkASSERT(!fHWPathStencilSettings.isTwoSided());
1640 GrGLenum fillMode =
1641 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
1642 GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
1643 GL_CALL(StencilFillPath(id, fillMode, writeMask));
1644 }
1646 void GrGpuGL::onGpuDrawPath(const GrPath* path, SkPath::FillType fill) {
1647 SkASSERT(this->caps()->pathRenderingSupport());
1649 GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
1650 SkASSERT(NULL != this->drawState()->getRenderTarget());
1651 SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
1652 SkASSERT(!fCurrentProgram->hasVertexShader());
1654 flushPathStencilSettings(fill);
1655 const SkStrokeRec& stroke = path->getStroke();
1657 SkPath::FillType nonInvertedFill = SkPath::ConvertToNonInverseFillType(fill);
1658 SkASSERT(!fHWPathStencilSettings.isTwoSided());
1659 GrGLenum fillMode =
1660 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
1661 GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
1663 if (stroke.isFillStyle() || SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle()) {
1664 GL_CALL(StencilFillPath(id, fillMode, writeMask));
1665 }
1666 if (stroke.needToApply()) {
1667 GL_CALL(StencilStrokePath(id, 0xffff, writeMask));
1668 }
1670 if (nonInvertedFill == fill) {
1671 if (stroke.needToApply()) {
1672 GL_CALL(CoverStrokePath(id, GR_GL_BOUNDING_BOX));
1673 } else {
1674 GL_CALL(CoverFillPath(id, GR_GL_BOUNDING_BOX));
1675 }
1676 } else {
1677 GrDrawState* drawState = this->drawState();
1678 GrDrawState::AutoViewMatrixRestore avmr;
1679 SkRect bounds = SkRect::MakeLTRB(0, 0,
1680 SkIntToScalar(drawState->getRenderTarget()->width()),
1681 SkIntToScalar(drawState->getRenderTarget()->height()));
1682 SkMatrix vmi;
1683 // mapRect through persp matrix may not be correct
1684 if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) {
1685 vmi.mapRect(&bounds);
1686 // theoretically could set bloat = 0, instead leave it because of matrix inversion
1687 // precision.
1688 SkScalar bloat = drawState->getViewMatrix().getMaxStretch() * SK_ScalarHalf;
1689 bounds.outset(bloat, bloat);
1690 } else {
1691 avmr.setIdentity(drawState);
1692 }
1694 this->drawSimpleRect(bounds, NULL);
1695 }
1696 }
1698 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) {
1699 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
1700 if (rt->needsResolve()) {
1701 // Some extensions automatically resolves the texture when it is read.
1702 if (this->glCaps().usesMSAARenderBuffers()) {
1703 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
1704 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
1705 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
1706 // make sure we go through flushRenderTarget() since we've modified
1707 // the bound DRAW FBO ID.
1708 fHWBoundRenderTarget = NULL;
1709 const GrGLIRect& vp = rt->getViewport();
1710 const SkIRect dirtyRect = rt->getResolveRect();
1711 GrGLIRect r;
1712 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
1713 dirtyRect.width(), dirtyRect.height(), target->origin());
1715 GrAutoTRestore<ScissorState> asr;
1716 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
1717 // Apple's extension uses the scissor as the blit bounds.
1718 asr.reset(&fScissorState);
1719 fScissorState.fEnabled = true;
1720 fScissorState.fRect = dirtyRect;
1721 this->flushScissor();
1722 GL_CALL(ResolveMultisampleFramebuffer());
1723 } else {
1724 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
1725 // this respects the scissor during the blit, so disable it.
1726 asr.reset(&fScissorState);
1727 fScissorState.fEnabled = false;
1728 this->flushScissor();
1729 }
1730 int right = r.fLeft + r.fWidth;
1731 int top = r.fBottom + r.fHeight;
1732 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
1733 r.fLeft, r.fBottom, right, top,
1734 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
1735 }
1736 }
1737 rt->flagAsResolved();
1738 }
1739 }
1741 namespace {
1743 GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) {
1744 static const GrGLenum gTable[] = {
1745 GR_GL_ALWAYS, // kAlways_StencilFunc
1746 GR_GL_NEVER, // kNever_StencilFunc
1747 GR_GL_GREATER, // kGreater_StencilFunc
1748 GR_GL_GEQUAL, // kGEqual_StencilFunc
1749 GR_GL_LESS, // kLess_StencilFunc
1750 GR_GL_LEQUAL, // kLEqual_StencilFunc,
1751 GR_GL_EQUAL, // kEqual_StencilFunc,
1752 GR_GL_NOTEQUAL, // kNotEqual_StencilFunc,
1753 };
1754 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kBasicStencilFuncCount);
1755 GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
1756 GR_STATIC_ASSERT(1 == kNever_StencilFunc);
1757 GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
1758 GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
1759 GR_STATIC_ASSERT(4 == kLess_StencilFunc);
1760 GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
1761 GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
1762 GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
1763 SkASSERT((unsigned) basicFunc < kBasicStencilFuncCount);
1765 return gTable[basicFunc];
1766 }
1768 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
1769 static const GrGLenum gTable[] = {
1770 GR_GL_KEEP, // kKeep_StencilOp
1771 GR_GL_REPLACE, // kReplace_StencilOp
1772 GR_GL_INCR_WRAP, // kIncWrap_StencilOp
1773 GR_GL_INCR, // kIncClamp_StencilOp
1774 GR_GL_DECR_WRAP, // kDecWrap_StencilOp
1775 GR_GL_DECR, // kDecClamp_StencilOp
1776 GR_GL_ZERO, // kZero_StencilOp
1777 GR_GL_INVERT, // kInvert_StencilOp
1778 };
1779 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kStencilOpCount);
1780 GR_STATIC_ASSERT(0 == kKeep_StencilOp);
1781 GR_STATIC_ASSERT(1 == kReplace_StencilOp);
1782 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
1783 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
1784 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
1785 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
1786 GR_STATIC_ASSERT(6 == kZero_StencilOp);
1787 GR_STATIC_ASSERT(7 == kInvert_StencilOp);
1788 SkASSERT((unsigned) op < kStencilOpCount);
1789 return gTable[op];
1790 }
1792 void set_gl_stencil(const GrGLInterface* gl,
1793 const GrStencilSettings& settings,
1794 GrGLenum glFace,
1795 GrStencilSettings::Face grFace) {
1796 GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace));
1797 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace));
1798 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace));
1800 GrGLint ref = settings.funcRef(grFace);
1801 GrGLint mask = settings.funcMask(grFace);
1802 GrGLint writeMask = settings.writeMask(grFace);
1804 if (GR_GL_FRONT_AND_BACK == glFace) {
1805 // we call the combined func just in case separate stencil is not
1806 // supported.
1807 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
1808 GR_GL_CALL(gl, StencilMask(writeMask));
1809 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp));
1810 } else {
1811 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
1812 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
1813 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp));
1814 }
1815 }
1816 }
1818 void GrGpuGL::flushStencil(DrawType type) {
1819 if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) {
1820 if (fStencilSettings.isDisabled()) {
1821 if (kNo_TriState != fHWStencilTestEnabled) {
1822 GL_CALL(Disable(GR_GL_STENCIL_TEST));
1823 fHWStencilTestEnabled = kNo_TriState;
1824 }
1825 } else {
1826 if (kYes_TriState != fHWStencilTestEnabled) {
1827 GL_CALL(Enable(GR_GL_STENCIL_TEST));
1828 fHWStencilTestEnabled = kYes_TriState;
1829 }
1830 }
1831 if (!fStencilSettings.isDisabled()) {
1832 if (this->caps()->twoSidedStencilSupport()) {
1833 set_gl_stencil(this->glInterface(),
1834 fStencilSettings,
1835 GR_GL_FRONT,
1836 GrStencilSettings::kFront_Face);
1837 set_gl_stencil(this->glInterface(),
1838 fStencilSettings,
1839 GR_GL_BACK,
1840 GrStencilSettings::kBack_Face);
1841 } else {
1842 set_gl_stencil(this->glInterface(),
1843 fStencilSettings,
1844 GR_GL_FRONT_AND_BACK,
1845 GrStencilSettings::kFront_Face);
1846 }
1847 }
1848 fHWStencilSettings = fStencilSettings;
1849 }
1850 }
1852 void GrGpuGL::flushAAState(DrawType type) {
1853 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but
1854 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide.
1855 #if 0
1856 // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern
1857 #define RT_HAS_MSAA rt->isMultisampled()
1858 #else
1859 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type)
1860 #endif
1862 const GrRenderTarget* rt = this->getDrawState().getRenderTarget();
1863 if (kGL_GrGLStandard == this->glStandard()) {
1864 // ES doesn't support toggling GL_MULTISAMPLE and doesn't have
1865 // smooth lines.
1866 // we prefer smooth lines over multisampled lines
1867 bool smoothLines = false;
1869 if (kDrawLines_DrawType == type) {
1870 smoothLines = this->willUseHWAALines();
1871 if (smoothLines) {
1872 if (kYes_TriState != fHWAAState.fSmoothLineEnabled) {
1873 GL_CALL(Enable(GR_GL_LINE_SMOOTH));
1874 fHWAAState.fSmoothLineEnabled = kYes_TriState;
1875 // must disable msaa to use line smoothing
1876 if (RT_HAS_MSAA &&
1877 kNo_TriState != fHWAAState.fMSAAEnabled) {
1878 GL_CALL(Disable(GR_GL_MULTISAMPLE));
1879 fHWAAState.fMSAAEnabled = kNo_TriState;
1880 }
1881 }
1882 } else {
1883 if (kNo_TriState != fHWAAState.fSmoothLineEnabled) {
1884 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
1885 fHWAAState.fSmoothLineEnabled = kNo_TriState;
1886 }
1887 }
1888 }
1889 if (!smoothLines && RT_HAS_MSAA) {
1890 // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths
1891 // convex hulls of each segment appear to get filled.
1892 bool enableMSAA = kStencilPath_DrawType == type ||
1893 this->getDrawState().isHWAntialiasState();
1894 if (enableMSAA) {
1895 if (kYes_TriState != fHWAAState.fMSAAEnabled) {
1896 GL_CALL(Enable(GR_GL_MULTISAMPLE));
1897 fHWAAState.fMSAAEnabled = kYes_TriState;
1898 }
1899 } else {
1900 if (kNo_TriState != fHWAAState.fMSAAEnabled) {
1901 GL_CALL(Disable(GR_GL_MULTISAMPLE));
1902 fHWAAState.fMSAAEnabled = kNo_TriState;
1903 }
1904 }
1905 }
1906 }
1907 }
1909 void GrGpuGL::flushPathStencilSettings(SkPath::FillType fill) {
1910 GrStencilSettings pathStencilSettings;
1911 this->getPathStencilSettingsForFillType(fill, &pathStencilSettings);
1912 if (fHWPathStencilSettings != pathStencilSettings) {
1913 // Just the func, ref, and mask is set here. The op and write mask are params to the call
1914 // that draws the path to the SB (glStencilFillPath)
1915 GrGLenum func =
1916 gr_to_gl_stencil_func(pathStencilSettings.func(GrStencilSettings::kFront_Face));
1917 GL_CALL(PathStencilFunc(func,
1918 pathStencilSettings.funcRef(GrStencilSettings::kFront_Face),
1919 pathStencilSettings.funcMask(GrStencilSettings::kFront_Face)));
1921 fHWPathStencilSettings = pathStencilSettings;
1922 }
1923 }
1925 void GrGpuGL::flushBlend(bool isLines,
1926 GrBlendCoeff srcCoeff,
1927 GrBlendCoeff dstCoeff) {
1928 if (isLines && this->willUseHWAALines()) {
1929 if (kYes_TriState != fHWBlendState.fEnabled) {
1930 GL_CALL(Enable(GR_GL_BLEND));
1931 fHWBlendState.fEnabled = kYes_TriState;
1932 }
1933 if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff ||
1934 kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) {
1935 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff],
1936 gXfermodeCoeff2Blend[kISA_GrBlendCoeff]));
1937 fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff;
1938 fHWBlendState.fDstCoeff = kISA_GrBlendCoeff;
1939 }
1940 } else {
1941 // any optimization to disable blending should
1942 // have already been applied and tweaked the coeffs
1943 // to (1, 0).
1944 bool blendOff = kOne_GrBlendCoeff == srcCoeff &&
1945 kZero_GrBlendCoeff == dstCoeff;
1946 if (blendOff) {
1947 if (kNo_TriState != fHWBlendState.fEnabled) {
1948 GL_CALL(Disable(GR_GL_BLEND));
1949 fHWBlendState.fEnabled = kNo_TriState;
1950 }
1951 } else {
1952 if (kYes_TriState != fHWBlendState.fEnabled) {
1953 GL_CALL(Enable(GR_GL_BLEND));
1954 fHWBlendState.fEnabled = kYes_TriState;
1955 }
1956 if (fHWBlendState.fSrcCoeff != srcCoeff ||
1957 fHWBlendState.fDstCoeff != dstCoeff) {
1958 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
1959 gXfermodeCoeff2Blend[dstCoeff]));
1960 fHWBlendState.fSrcCoeff = srcCoeff;
1961 fHWBlendState.fDstCoeff = dstCoeff;
1962 }
1963 GrColor blendConst = this->getDrawState().getBlendConstant();
1964 if ((BlendCoeffReferencesConstant(srcCoeff) ||
1965 BlendCoeffReferencesConstant(dstCoeff)) &&
1966 (!fHWBlendState.fConstColorValid ||
1967 fHWBlendState.fConstColor != blendConst)) {
1968 GrGLfloat c[4];
1969 GrColorToRGBAFloat(blendConst, c);
1970 GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
1971 fHWBlendState.fConstColor = blendConst;
1972 fHWBlendState.fConstColorValid = true;
1973 }
1974 }
1975 }
1976 }
1978 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
1979 static const GrGLenum gWrapModes[] = {
1980 GR_GL_CLAMP_TO_EDGE,
1981 GR_GL_REPEAT,
1982 GR_GL_MIRRORED_REPEAT
1983 };
1984 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
1985 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
1986 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
1987 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
1988 return gWrapModes[tm];
1989 }
1991 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) {
1992 SkASSERT(NULL != texture);
1994 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
1995 // from the rt it will still be the last bound texture, but it needs resolving. So keep this
1996 // out of the "last != next" check.
1997 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
1998 if (NULL != texRT) {
1999 this->onResolveRenderTarget(texRT);
2000 }
2002 if (fHWBoundTextures[unitIdx] != texture) {
2003 this->setTextureUnit(unitIdx);
2004 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID()));
2005 fHWBoundTextures[unitIdx] = texture;
2006 }
2008 ResetTimestamp timestamp;
2009 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp);
2010 bool setAll = timestamp < this->getResetTimestamp();
2011 GrGLTexture::TexParams newTexParams;
2013 static GrGLenum glMinFilterModes[] = {
2014 GR_GL_NEAREST,
2015 GR_GL_LINEAR,
2016 GR_GL_LINEAR_MIPMAP_LINEAR
2017 };
2018 static GrGLenum glMagFilterModes[] = {
2019 GR_GL_NEAREST,
2020 GR_GL_LINEAR,
2021 GR_GL_LINEAR
2022 };
2023 GrTextureParams::FilterMode filterMode = params.filterMode();
2024 if (!this->caps()->mipMapSupport() && GrTextureParams::kMipMap_FilterMode == filterMode) {
2025 filterMode = GrTextureParams::kBilerp_FilterMode;
2026 }
2027 newTexParams.fMinFilter = glMinFilterModes[filterMode];
2028 newTexParams.fMagFilter = glMagFilterModes[filterMode];
2030 if (GrTextureParams::kMipMap_FilterMode == filterMode && texture->mipMapsAreDirty()) {
2031 // GL_CALL(Hint(GR_GL_GENERATE_MIPMAP_HINT,GR_GL_NICEST));
2032 GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D));
2033 texture->dirtyMipMaps(false);
2034 }
2036 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
2037 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
2038 memcpy(newTexParams.fSwizzleRGBA,
2039 GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()),
2040 sizeof(newTexParams.fSwizzleRGBA));
2041 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
2042 this->setTextureUnit(unitIdx);
2043 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2044 GR_GL_TEXTURE_MAG_FILTER,
2045 newTexParams.fMagFilter));
2046 }
2047 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
2048 this->setTextureUnit(unitIdx);
2049 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2050 GR_GL_TEXTURE_MIN_FILTER,
2051 newTexParams.fMinFilter));
2052 }
2053 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
2054 this->setTextureUnit(unitIdx);
2055 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2056 GR_GL_TEXTURE_WRAP_S,
2057 newTexParams.fWrapS));
2058 }
2059 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
2060 this->setTextureUnit(unitIdx);
2061 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2062 GR_GL_TEXTURE_WRAP_T,
2063 newTexParams.fWrapT));
2064 }
2065 if (this->glCaps().textureSwizzleSupport() &&
2066 (setAll || memcmp(newTexParams.fSwizzleRGBA,
2067 oldTexParams.fSwizzleRGBA,
2068 sizeof(newTexParams.fSwizzleRGBA)))) {
2069 this->setTextureUnit(unitIdx);
2070 if (this->glStandard() == kGLES_GrGLStandard) {
2071 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2072 const GrGLenum* swizzle = newTexParams.fSwizzleRGBA;
2073 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
2074 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
2075 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
2076 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
2077 } else {
2078 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint));
2079 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA);
2080 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
2081 }
2082 }
2083 texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
2084 }
2086 void GrGpuGL::setProjectionMatrix(const SkMatrix& matrix,
2087 const SkISize& renderTargetSize,
2088 GrSurfaceOrigin renderTargetOrigin) {
2090 SkASSERT(this->glCaps().fixedFunctionSupport());
2092 if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
2093 renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
2094 matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
2095 return;
2096 }
2098 fHWProjectionMatrixState.fViewMatrix = matrix;
2099 fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
2100 fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
2102 GrGLfloat glMatrix[4 * 4];
2103 fHWProjectionMatrixState.getGLMatrix<4>(glMatrix);
2104 GL_CALL(MatrixMode(GR_GL_PROJECTION));
2105 GL_CALL(LoadMatrixf(glMatrix));
2106 }
2108 void GrGpuGL::enableTexGen(int unitIdx,
2109 TexGenComponents components,
2110 const GrGLfloat* coefficients) {
2111 SkASSERT(this->glCaps().fixedFunctionSupport());
2112 SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents);
2113 SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= unitIdx);
2115 if (GR_GL_OBJECT_LINEAR == fHWTexGenSettings[unitIdx].fMode &&
2116 components == fHWTexGenSettings[unitIdx].fNumComponents &&
2117 !memcmp(coefficients, fHWTexGenSettings[unitIdx].fCoefficients,
2118 3 * components * sizeof(GrGLfloat))) {
2119 return;
2120 }
2122 this->setTextureUnit(unitIdx);
2124 if (GR_GL_OBJECT_LINEAR != fHWTexGenSettings[unitIdx].fMode) {
2125 for (int i = 0; i < 4; i++) {
2126 GL_CALL(TexGeni(GR_GL_S + i, GR_GL_TEXTURE_GEN_MODE, GR_GL_OBJECT_LINEAR));
2127 }
2128 fHWTexGenSettings[unitIdx].fMode = GR_GL_OBJECT_LINEAR;
2129 }
2131 for (int i = fHWTexGenSettings[unitIdx].fNumComponents; i < components; i++) {
2132 GL_CALL(Enable(GR_GL_TEXTURE_GEN_S + i));
2133 }
2134 for (int i = components; i < fHWTexGenSettings[unitIdx].fNumComponents; i++) {
2135 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + i));
2136 }
2137 fHWTexGenSettings[unitIdx].fNumComponents = components;
2139 for (int i = 0; i < components; i++) {
2140 GrGLfloat plane[] = {coefficients[0 + 3 * i],
2141 coefficients[1 + 3 * i],
2142 0,
2143 coefficients[2 + 3 * i]};
2144 GL_CALL(TexGenfv(GR_GL_S + i, GR_GL_OBJECT_PLANE, plane));
2145 }
2147 if (this->caps()->pathRenderingSupport()) {
2148 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + unitIdx,
2149 GR_GL_OBJECT_LINEAR,
2150 components,
2151 coefficients));
2152 }
2154 memcpy(fHWTexGenSettings[unitIdx].fCoefficients, coefficients,
2155 3 * components * sizeof(GrGLfloat));
2156 }
2158 void GrGpuGL::enableTexGen(int unitIdx, TexGenComponents components, const SkMatrix& matrix) {
2159 GrGLfloat coefficients[3 * 3];
2160 SkASSERT(this->glCaps().fixedFunctionSupport());
2161 SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents);
2163 coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
2164 coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
2165 coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
2167 if (components >= kST_TexGenComponents) {
2168 coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
2169 coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
2170 coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
2171 }
2173 if (components >= kSTR_TexGenComponents) {
2174 coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
2175 coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
2176 coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
2177 }
2179 enableTexGen(unitIdx, components, coefficients);
2180 }
2182 void GrGpuGL::flushTexGenSettings(int numUsedTexCoordSets) {
2183 SkASSERT(this->glCaps().fixedFunctionSupport());
2184 SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= numUsedTexCoordSets);
2186 // Only write the inactive tex gens, since active tex gens were written
2187 // when they were enabled.
2189 SkDEBUGCODE(
2190 for (int i = 0; i < numUsedTexCoordSets; i++) {
2191 SkASSERT(0 != fHWTexGenSettings[i].fNumComponents);
2192 }
2193 );
2195 for (int i = numUsedTexCoordSets; i < fHWActiveTexGenSets; i++) {
2196 SkASSERT(0 != fHWTexGenSettings[i].fNumComponents);
2198 this->setTextureUnit(i);
2199 for (int j = 0; j < fHWTexGenSettings[i].fNumComponents; j++) {
2200 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + j));
2201 }
2203 if (this->caps()->pathRenderingSupport()) {
2204 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
2205 }
2207 fHWTexGenSettings[i].fNumComponents = 0;
2208 }
2210 fHWActiveTexGenSets = numUsedTexCoordSets;
2211 }
2213 void GrGpuGL::flushMiscFixedFunctionState() {
2215 const GrDrawState& drawState = this->getDrawState();
2217 if (drawState.isDitherState()) {
2218 if (kYes_TriState != fHWDitherEnabled) {
2219 GL_CALL(Enable(GR_GL_DITHER));
2220 fHWDitherEnabled = kYes_TriState;
2221 }
2222 } else {
2223 if (kNo_TriState != fHWDitherEnabled) {
2224 GL_CALL(Disable(GR_GL_DITHER));
2225 fHWDitherEnabled = kNo_TriState;
2226 }
2227 }
2229 if (drawState.isColorWriteDisabled()) {
2230 if (kNo_TriState != fHWWriteToColor) {
2231 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
2232 GR_GL_FALSE, GR_GL_FALSE));
2233 fHWWriteToColor = kNo_TriState;
2234 }
2235 } else {
2236 if (kYes_TriState != fHWWriteToColor) {
2237 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2238 fHWWriteToColor = kYes_TriState;
2239 }
2240 }
2242 if (fHWDrawFace != drawState.getDrawFace()) {
2243 switch (this->getDrawState().getDrawFace()) {
2244 case GrDrawState::kCCW_DrawFace:
2245 GL_CALL(Enable(GR_GL_CULL_FACE));
2246 GL_CALL(CullFace(GR_GL_BACK));
2247 break;
2248 case GrDrawState::kCW_DrawFace:
2249 GL_CALL(Enable(GR_GL_CULL_FACE));
2250 GL_CALL(CullFace(GR_GL_FRONT));
2251 break;
2252 case GrDrawState::kBoth_DrawFace:
2253 GL_CALL(Disable(GR_GL_CULL_FACE));
2254 break;
2255 default:
2256 GrCrash("Unknown draw face.");
2257 }
2258 fHWDrawFace = drawState.getDrawFace();
2259 }
2260 }
2262 void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) {
2263 SkASSERT(NULL != renderTarget);
2264 if (fHWBoundRenderTarget == renderTarget) {
2265 fHWBoundRenderTarget = NULL;
2266 }
2267 }
2269 void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) {
2270 for (int s = 0; s < fHWBoundTextures.count(); ++s) {
2271 if (fHWBoundTextures[s] == texture) {
2272 // deleting bound texture does implied bind to 0
2273 fHWBoundTextures[s] = NULL;
2274 }
2275 }
2276 }
2278 bool GrGpuGL::configToGLFormats(GrPixelConfig config,
2279 bool getSizedInternalFormat,
2280 GrGLenum* internalFormat,
2281 GrGLenum* externalFormat,
2282 GrGLenum* externalType) {
2283 GrGLenum dontCare;
2284 if (NULL == internalFormat) {
2285 internalFormat = &dontCare;
2286 }
2287 if (NULL == externalFormat) {
2288 externalFormat = &dontCare;
2289 }
2290 if (NULL == externalType) {
2291 externalType = &dontCare;
2292 }
2294 switch (config) {
2295 case kRGBA_8888_GrPixelConfig:
2296 *internalFormat = GR_GL_RGBA;
2297 *externalFormat = GR_GL_RGBA;
2298 if (getSizedInternalFormat) {
2299 *internalFormat = GR_GL_RGBA8;
2300 } else {
2301 *internalFormat = GR_GL_RGBA;
2302 }
2303 *externalType = GR_GL_UNSIGNED_BYTE;
2304 break;
2305 case kBGRA_8888_GrPixelConfig:
2306 if (!this->glCaps().bgraFormatSupport()) {
2307 return false;
2308 }
2309 if (this->glCaps().bgraIsInternalFormat()) {
2310 if (getSizedInternalFormat) {
2311 *internalFormat = GR_GL_BGRA8;
2312 } else {
2313 *internalFormat = GR_GL_BGRA;
2314 }
2315 } else {
2316 if (getSizedInternalFormat) {
2317 *internalFormat = GR_GL_RGBA8;
2318 } else {
2319 *internalFormat = GR_GL_RGBA;
2320 }
2321 }
2322 *externalFormat = GR_GL_BGRA;
2323 *externalType = GR_GL_UNSIGNED_BYTE;
2324 break;
2325 case kRGB_565_GrPixelConfig:
2326 *internalFormat = GR_GL_RGB;
2327 *externalFormat = GR_GL_RGB;
2328 if (getSizedInternalFormat) {
2329 if (this->glStandard() == kGL_GrGLStandard) {
2330 return false;
2331 } else {
2332 *internalFormat = GR_GL_RGB565;
2333 }
2334 } else {
2335 *internalFormat = GR_GL_RGB;
2336 }
2337 *externalType = GR_GL_UNSIGNED_SHORT_5_6_5;
2338 break;
2339 case kRGBA_4444_GrPixelConfig:
2340 *internalFormat = GR_GL_RGBA;
2341 *externalFormat = GR_GL_RGBA;
2342 if (getSizedInternalFormat) {
2343 *internalFormat = GR_GL_RGBA4;
2344 } else {
2345 *internalFormat = GR_GL_RGBA;
2346 }
2347 *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
2348 break;
2349 case kIndex_8_GrPixelConfig:
2350 if (this->caps()->eightBitPaletteSupport()) {
2351 *internalFormat = GR_GL_PALETTE8_RGBA8;
2352 // glCompressedTexImage doesn't take external params
2353 *externalFormat = GR_GL_PALETTE8_RGBA8;
2354 // no sized/unsized internal format distinction here
2355 *internalFormat = GR_GL_PALETTE8_RGBA8;
2356 // unused with CompressedTexImage
2357 *externalType = GR_GL_UNSIGNED_BYTE;
2358 } else {
2359 return false;
2360 }
2361 break;
2362 case kAlpha_8_GrPixelConfig:
2363 if (this->glCaps().textureRedSupport()) {
2364 *internalFormat = GR_GL_RED;
2365 *externalFormat = GR_GL_RED;
2366 if (getSizedInternalFormat) {
2367 *internalFormat = GR_GL_R8;
2368 } else {
2369 *internalFormat = GR_GL_RED;
2370 }
2371 *externalType = GR_GL_UNSIGNED_BYTE;
2372 } else {
2373 *internalFormat = GR_GL_ALPHA;
2374 *externalFormat = GR_GL_ALPHA;
2375 if (getSizedInternalFormat) {
2376 *internalFormat = GR_GL_ALPHA8;
2377 } else {
2378 *internalFormat = GR_GL_ALPHA;
2379 }
2380 *externalType = GR_GL_UNSIGNED_BYTE;
2381 }
2382 break;
2383 default:
2384 return false;
2385 }
2386 return true;
2387 }
2389 void GrGpuGL::setTextureUnit(int unit) {
2390 SkASSERT(unit >= 0 && unit < fHWBoundTextures.count());
2391 if (unit != fHWActiveTextureUnitIdx) {
2392 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
2393 fHWActiveTextureUnitIdx = unit;
2394 }
2395 }
2397 void GrGpuGL::setScratchTextureUnit() {
2398 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
2399 int lastUnitIdx = fHWBoundTextures.count() - 1;
2400 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
2401 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
2402 fHWActiveTextureUnitIdx = lastUnitIdx;
2403 }
2404 // clear out the this field so that if a program does use this unit it will rebind the correct
2405 // texture.
2406 fHWBoundTextures[lastUnitIdx] = NULL;
2407 }
2409 namespace {
2410 // Determines whether glBlitFramebuffer could be used between src and dst.
2411 inline bool can_blit_framebuffer(const GrSurface* dst,
2412 const GrSurface* src,
2413 const GrGpuGL* gpu,
2414 bool* wouldNeedTempFBO = NULL) {
2415 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
2416 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2417 gpu->glCaps().usesMSAARenderBuffers()) {
2418 // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
2419 // or the rects are not the same (not just the same size but have the same edges).
2420 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
2421 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
2422 return false;
2423 }
2424 if (NULL != wouldNeedTempFBO) {
2425 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget();
2426 }
2427 return true;
2428 } else {
2429 return false;
2430 }
2431 }
2433 inline bool can_copy_texsubimage(const GrSurface* dst,
2434 const GrSurface* src,
2435 const GrGpuGL* gpu,
2436 bool* wouldNeedTempFBO = NULL) {
2437 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
2438 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
2439 // many drivers would allow it to work, but ANGLE does not.
2440 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
2441 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
2442 return false;
2443 }
2444 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
2445 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
2446 // then we don't want to copy to the texture but to the MSAA buffer.
2447 if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
2448 return false;
2449 }
2450 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2451 // If the src is multisampled (and uses an extension where there is a separate MSAA
2452 // renderbuffer) then it is an invalid operation to call CopyTexSubImage
2453 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2454 return false;
2455 }
2456 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2457 NULL != dst->asTexture() &&
2458 dst->origin() == src->origin() &&
2459 kIndex_8_GrPixelConfig != src->config()) {
2460 if (NULL != wouldNeedTempFBO) {
2461 *wouldNeedTempFBO = NULL == src->asRenderTarget();
2462 }
2463 return true;
2464 } else {
2465 return false;
2466 }
2467 }
2469 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
2470 // relative to is output.
2471 inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl,
2472 GrSurface* surface,
2473 GrGLenum fboTarget,
2474 GrGLIRect* viewport) {
2475 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2476 GrGLuint tempFBOID;
2477 if (NULL == rt) {
2478 SkASSERT(NULL != surface->asTexture());
2479 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
2480 GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID));
2481 GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID));
2482 GR_GL_CALL(gl, FramebufferTexture2D(fboTarget,
2483 GR_GL_COLOR_ATTACHMENT0,
2484 GR_GL_TEXTURE_2D,
2485 texID,
2486 0));
2487 viewport->fLeft = 0;
2488 viewport->fBottom = 0;
2489 viewport->fWidth = surface->width();
2490 viewport->fHeight = surface->height();
2491 } else {
2492 tempFBOID = 0;
2493 GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID()));
2494 *viewport = rt->getViewport();
2495 }
2496 return tempFBOID;
2497 }
2499 }
2501 void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
2502 // Check for format issues with glCopyTexSubImage2D
2503 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
2504 kBGRA_8888_GrPixelConfig == src->config()) {
2505 // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target
2506 // in order to call glBlitFramebuffer or to copy to it by rendering.
2507 INHERITED::initCopySurfaceDstDesc(src, desc);
2508 return;
2509 } else if (NULL == src->asRenderTarget()) {
2510 // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base
2511 // class handle it by rendering.
2512 INHERITED::initCopySurfaceDstDesc(src, desc);
2513 return;
2514 }
2516 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2517 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2518 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer.
2519 INHERITED::initCopySurfaceDstDesc(src, desc);
2520 } else {
2521 desc->fConfig = src->config();
2522 desc->fOrigin = src->origin();
2523 desc->fFlags = kNone_GrTextureFlags;
2524 }
2525 }
2527 bool GrGpuGL::onCopySurface(GrSurface* dst,
2528 GrSurface* src,
2529 const SkIRect& srcRect,
2530 const SkIPoint& dstPoint) {
2531 bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
2532 bool copied = false;
2533 bool wouldNeedTempFBO = false;
2534 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) &&
2535 (!wouldNeedTempFBO || !inheritedCouldCopy)) {
2536 GrGLuint srcFBO;
2537 GrGLIRect srcVP;
2538 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP);
2539 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture());
2540 SkASSERT(NULL != dstTex);
2541 // We modified the bound FBO
2542 fHWBoundRenderTarget = NULL;
2543 GrGLIRect srcGLRect;
2544 srcGLRect.setRelativeTo(srcVP,
2545 srcRect.fLeft,
2546 srcRect.fTop,
2547 srcRect.width(),
2548 srcRect.height(),
2549 src->origin());
2551 this->setScratchTextureUnit();
2552 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID()));
2553 GrGLint dstY;
2554 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
2555 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
2556 } else {
2557 dstY = dstPoint.fY;
2558 }
2559 GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0,
2560 dstPoint.fX, dstY,
2561 srcGLRect.fLeft, srcGLRect.fBottom,
2562 srcGLRect.fWidth, srcGLRect.fHeight));
2563 copied = true;
2564 if (srcFBO) {
2565 GL_CALL(DeleteFramebuffers(1, &srcFBO));
2566 }
2567 } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) &&
2568 (!wouldNeedTempFBO || !inheritedCouldCopy)) {
2569 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2570 srcRect.width(), srcRect.height());
2571 bool selfOverlap = false;
2572 if (dst->isSameAs(src)) {
2573 selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect);
2574 }
2576 if (!selfOverlap) {
2577 GrGLuint dstFBO;
2578 GrGLuint srcFBO;
2579 GrGLIRect dstVP;
2580 GrGLIRect srcVP;
2581 dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP);
2582 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP);
2583 // We modified the bound FBO
2584 fHWBoundRenderTarget = NULL;
2585 GrGLIRect srcGLRect;
2586 GrGLIRect dstGLRect;
2587 srcGLRect.setRelativeTo(srcVP,
2588 srcRect.fLeft,
2589 srcRect.fTop,
2590 srcRect.width(),
2591 srcRect.height(),
2592 src->origin());
2593 dstGLRect.setRelativeTo(dstVP,
2594 dstRect.fLeft,
2595 dstRect.fTop,
2596 dstRect.width(),
2597 dstRect.height(),
2598 dst->origin());
2600 GrAutoTRestore<ScissorState> asr;
2601 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
2602 // The EXT version applies the scissor during the blit, so disable it.
2603 asr.reset(&fScissorState);
2604 fScissorState.fEnabled = false;
2605 this->flushScissor();
2606 }
2607 GrGLint srcY0;
2608 GrGLint srcY1;
2609 // Does the blit need to y-mirror or not?
2610 if (src->origin() == dst->origin()) {
2611 srcY0 = srcGLRect.fBottom;
2612 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
2613 } else {
2614 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
2615 srcY1 = srcGLRect.fBottom;
2616 }
2617 GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
2618 srcY0,
2619 srcGLRect.fLeft + srcGLRect.fWidth,
2620 srcY1,
2621 dstGLRect.fLeft,
2622 dstGLRect.fBottom,
2623 dstGLRect.fLeft + dstGLRect.fWidth,
2624 dstGLRect.fBottom + dstGLRect.fHeight,
2625 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2626 if (dstFBO) {
2627 GL_CALL(DeleteFramebuffers(1, &dstFBO));
2628 }
2629 if (srcFBO) {
2630 GL_CALL(DeleteFramebuffers(1, &srcFBO));
2631 }
2632 copied = true;
2633 }
2634 }
2635 if (!copied && inheritedCouldCopy) {
2636 copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint);
2637 SkASSERT(copied);
2638 }
2639 return copied;
2640 }
2642 bool GrGpuGL::onCanCopySurface(GrSurface* dst,
2643 GrSurface* src,
2644 const SkIRect& srcRect,
2645 const SkIPoint& dstPoint) {
2646 // This mirrors the logic in onCopySurface.
2647 if (can_copy_texsubimage(dst, src, this)) {
2648 return true;
2649 }
2650 if (can_blit_framebuffer(dst, src, this)) {
2651 if (dst->isSameAs(src)) {
2652 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2653 srcRect.width(), srcRect.height());
2654 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
2655 return true;
2656 }
2657 } else {
2658 return true;
2659 }
2660 }
2661 return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
2662 }
2664 void GrGpuGL::onInstantGpuTraceEvent(const char* marker) {
2665 if (this->caps()->gpuTracingSupport()) {
2666 // GL_CALL(InsertEventMarker(0, marker));
2667 }
2668 }
2670 void GrGpuGL::onPushGpuTraceEvent(const char* marker) {
2671 if (this->caps()->gpuTracingSupport()) {
2672 // GL_CALL(PushGroupMarker(0, marker));
2673 }
2674 }
2676 void GrGpuGL::onPopGpuTraceEvent() {
2677 if (this->caps()->gpuTracingSupport()) {
2678 // GL_CALL(PopGroupMarker());
2679 }
2680 }
2682 ///////////////////////////////////////////////////////////////////////////////
2684 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw(
2685 GrGpuGL* gpu,
2686 const GrGLVertexBuffer* vbuffer,
2687 const GrGLIndexBuffer* ibuffer) {
2688 SkASSERT(NULL != vbuffer);
2689 GrGLAttribArrayState* attribState;
2691 // We use a vertex array if we're on a core profile and the verts are in a VBO.
2692 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) {
2693 if (NULL == fVBOVertexArray || !fVBOVertexArray->isValid()) {
2694 SkSafeUnref(fVBOVertexArray);
2695 GrGLuint arrayID;
2696 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
2697 int attrCount = gpu->glCaps().maxVertexAttributes();
2698 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount));
2699 }
2700 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer);
2701 } else {
2702 if (NULL != ibuffer) {
2703 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID());
2704 } else {
2705 this->setVertexArrayID(gpu, 0);
2706 }
2707 int attrCount = gpu->glCaps().maxVertexAttributes();
2708 if (fDefaultVertexArrayAttribState.count() != attrCount) {
2709 fDefaultVertexArrayAttribState.resize(attrCount);
2710 }
2711 attribState = &fDefaultVertexArrayAttribState;
2712 }
2713 return attribState;
2714 }