1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/gfx/skia/trunk/src/gpu/gl/GrGpuGL.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,2714 @@ 1.4 +/* 1.5 + * Copyright 2011 Google Inc. 1.6 + * 1.7 + * Use of this source code is governed by a BSD-style license that can be 1.8 + * found in the LICENSE file. 1.9 + */ 1.10 + 1.11 + 1.12 +#include "GrGpuGL.h" 1.13 +#include "GrGLStencilBuffer.h" 1.14 +#include "GrGLPath.h" 1.15 +#include "GrGLShaderBuilder.h" 1.16 +#include "GrTemplates.h" 1.17 +#include "GrTypes.h" 1.18 +#include "SkStrokeRec.h" 1.19 +#include "SkTemplates.h" 1.20 + 1.21 +#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 1.22 +#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 1.23 + 1.24 +#define SKIP_CACHE_CHECK true 1.25 + 1.26 +#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 1.27 + #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 1.28 + #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 1.29 + #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 1.30 +#else 1.31 + #define CLEAR_ERROR_BEFORE_ALLOC(iface) 1.32 + #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 1.33 + #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 1.34 +#endif 1.35 + 1.36 + 1.37 +/////////////////////////////////////////////////////////////////////////////// 1.38 + 1.39 +static const GrGLenum gXfermodeCoeff2Blend[] = { 1.40 + GR_GL_ZERO, 1.41 + GR_GL_ONE, 1.42 + GR_GL_SRC_COLOR, 1.43 + GR_GL_ONE_MINUS_SRC_COLOR, 1.44 + GR_GL_DST_COLOR, 1.45 + GR_GL_ONE_MINUS_DST_COLOR, 1.46 + GR_GL_SRC_ALPHA, 1.47 + GR_GL_ONE_MINUS_SRC_ALPHA, 1.48 + GR_GL_DST_ALPHA, 1.49 + GR_GL_ONE_MINUS_DST_ALPHA, 1.50 + GR_GL_CONSTANT_COLOR, 1.51 + GR_GL_ONE_MINUS_CONSTANT_COLOR, 1.52 + GR_GL_CONSTANT_ALPHA, 1.53 + GR_GL_ONE_MINUS_CONSTANT_ALPHA, 1.54 + 1.55 + // extended blend coeffs 1.56 + GR_GL_SRC1_COLOR, 1.57 + GR_GL_ONE_MINUS_SRC1_COLOR, 1.58 + GR_GL_SRC1_ALPHA, 1.59 + GR_GL_ONE_MINUS_SRC1_ALPHA, 1.60 +}; 1.61 + 1.62 +bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 1.63 + static const bool gCoeffReferencesBlendConst[] = { 1.64 + false, 1.65 + false, 1.66 + false, 1.67 + false, 1.68 + false, 1.69 + false, 1.70 + false, 1.71 + false, 1.72 + false, 1.73 + false, 1.74 + true, 1.75 + true, 1.76 + true, 1.77 + true, 1.78 + 1.79 + // extended blend coeffs 1.80 + false, 1.81 + false, 1.82 + false, 1.83 + false, 1.84 + }; 1.85 + return gCoeffReferencesBlendConst[coeff]; 1.86 + GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 1.87 + GR_ARRAY_COUNT(gCoeffReferencesBlendConst)); 1.88 + 1.89 + GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); 1.90 + GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); 1.91 + GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); 1.92 + GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); 1.93 + GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); 1.94 + GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); 1.95 + GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); 1.96 + GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); 1.97 + GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); 1.98 + GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); 1.99 + GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); 1.100 + GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); 1.101 + GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); 1.102 + GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); 1.103 + 1.104 + GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); 1.105 + GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); 1.106 + GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); 1.107 + GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); 1.108 + 1.109 + // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 1.110 + GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 1.111 + GR_ARRAY_COUNT(gXfermodeCoeff2Blend)); 1.112 +} 1.113 + 1.114 +/////////////////////////////////////////////////////////////////////////////// 1.115 + 1.116 +static bool gPrintStartupSpew; 1.117 + 1.118 +GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context) 1.119 + : GrGpu(context) 1.120 + , fGLContext(ctx) { 1.121 + 1.122 + SkASSERT(ctx.isInitialized()); 1.123 + fCaps.reset(SkRef(ctx.caps())); 1.124 + 1.125 + fHWBoundTextures.reset(this->glCaps().maxFragmentTextureUnits()); 1.126 + fHWTexGenSettings.reset(this->glCaps().maxFixedFunctionTextureCoords()); 1.127 + 1.128 + GrGLClearErr(fGLContext.interface()); 1.129 + if (gPrintStartupSpew) { 1.130 + const GrGLubyte* vendor; 1.131 + const GrGLubyte* renderer; 1.132 + const GrGLubyte* version; 1.133 + GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); 1.134 + GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); 1.135 + GL_CALL_RET(version, GetString(GR_GL_VERSION)); 1.136 + GrPrintf("------------------------- create GrGpuGL %p --------------\n", 1.137 + this); 1.138 + GrPrintf("------ VENDOR %s\n", vendor); 1.139 + GrPrintf("------ RENDERER %s\n", renderer); 1.140 + GrPrintf("------ VERSION %s\n", version); 1.141 + GrPrintf("------ EXTENSIONS\n"); 1.142 +#if 0 // TODO: Reenable this after GrGLInterface's extensions can be accessed safely. 1.143 + ctx.extensions().print(); 1.144 +#endif 1.145 + GrPrintf("\n"); 1.146 + GrPrintf(this->glCaps().dump().c_str()); 1.147 + } 1.148 + 1.149 + fProgramCache = SkNEW_ARGS(ProgramCache, (this)); 1.150 + 1.151 + SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt); 1.152 + 1.153 + fLastSuccessfulStencilFmtIdx = 0; 1.154 + fHWProgramID = 0; 1.155 +} 1.156 + 1.157 +GrGpuGL::~GrGpuGL() { 1.158 + if (0 != fHWProgramID) { 1.159 + // detach the current program so there is no confusion on OpenGL's part 1.160 + // that we want it to be deleted 1.161 + SkASSERT(fHWProgramID == fCurrentProgram->programID()); 1.162 + GL_CALL(UseProgram(0)); 1.163 + } 1.164 + 1.165 + delete fProgramCache; 1.166 + 1.167 + // This must be called by before the GrDrawTarget destructor 1.168 + this->releaseGeometry(); 1.169 + // This subclass must do this before the base class destructor runs 1.170 + // since we will unref the GrGLInterface. 1.171 + this->releaseResources(); 1.172 +} 1.173 + 1.174 +/////////////////////////////////////////////////////////////////////////////// 1.175 + 1.176 + 1.177 +GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig, 1.178 + GrPixelConfig surfaceConfig) const { 1.179 + if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) { 1.180 + return kBGRA_8888_GrPixelConfig; 1.181 + } else if (this->glContext().isMesa() && 1.182 + GrBytesPerPixel(readConfig) == 4 && 1.183 + GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { 1.184 + // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa. 1.185 + // Perhaps this should be guarded by some compiletime or runtime check. 1.186 + return surfaceConfig; 1.187 + } else if (readConfig == kBGRA_8888_GrPixelConfig && 1.188 + !this->glCaps().readPixelsSupported(this->glInterface(), 1.189 + GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) { 1.190 + return kRGBA_8888_GrPixelConfig; 1.191 + } else { 1.192 + return readConfig; 1.193 + } 1.194 +} 1.195 + 1.196 +GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig, 1.197 + GrPixelConfig surfaceConfig) const { 1.198 + if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) { 1.199 + return kBGRA_8888_GrPixelConfig; 1.200 + } else { 1.201 + return writeConfig; 1.202 + } 1.203 +} 1.204 + 1.205 +bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const { 1.206 + if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) { 1.207 + return false; 1.208 + } 1.209 + if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) { 1.210 + // In general ES2 requires the internal format of the texture and the format of the src 1.211 + // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA 1.212 + // texture. It depends upon which extension added BGRA. The Apple extension allows it 1.213 + // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own 1.214 + // internal format). 1.215 + if (this->glCaps().bgraFormatSupport() && 1.216 + !this->glCaps().bgraIsInternalFormat() && 1.217 + kBGRA_8888_GrPixelConfig == srcConfig && 1.218 + kRGBA_8888_GrPixelConfig == texture->config()) { 1.219 + return true; 1.220 + } else { 1.221 + return false; 1.222 + } 1.223 + } else { 1.224 + return true; 1.225 + } 1.226 +} 1.227 + 1.228 +bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const { 1.229 + return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); 1.230 +} 1.231 + 1.232 +void GrGpuGL::onResetContext(uint32_t resetBits) { 1.233 + // we don't use the zb at all 1.234 + if (resetBits & kMisc_GrGLBackendState) { 1.235 + GL_CALL(Disable(GR_GL_DEPTH_TEST)); 1.236 + GL_CALL(DepthMask(GR_GL_FALSE)); 1.237 + 1.238 + fHWDrawFace = GrDrawState::kInvalid_DrawFace; 1.239 + fHWDitherEnabled = kUnknown_TriState; 1.240 + 1.241 + if (kGL_GrGLStandard == this->glStandard()) { 1.242 + // Desktop-only state that we never change 1.243 + if (!this->glCaps().isCoreProfile()) { 1.244 + GL_CALL(Disable(GR_GL_POINT_SMOOTH)); 1.245 + GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 1.246 + GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); 1.247 + GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); 1.248 + GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); 1.249 + GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); 1.250 + } 1.251 + // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a 1.252 + // core profile. This seems like a bug since the core spec removes any mention of 1.253 + // GL_ARB_imaging. 1.254 + if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { 1.255 + GL_CALL(Disable(GR_GL_COLOR_TABLE)); 1.256 + } 1.257 + GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); 1.258 + // Since ES doesn't support glPointSize at all we always use the VS to 1.259 + // set the point size 1.260 + GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); 1.261 + 1.262 + // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't 1.263 + // currently part of our gl interface. There are probably others as 1.264 + // well. 1.265 + } 1.266 + fHWWriteToColor = kUnknown_TriState; 1.267 + // we only ever use lines in hairline mode 1.268 + GL_CALL(LineWidth(1)); 1.269 + } 1.270 + 1.271 + if (resetBits & kAA_GrGLBackendState) { 1.272 + fHWAAState.invalidate(); 1.273 + } 1.274 + 1.275 + fHWActiveTextureUnitIdx = -1; // invalid 1.276 + 1.277 + if (resetBits & kTextureBinding_GrGLBackendState) { 1.278 + for (int s = 0; s < fHWBoundTextures.count(); ++s) { 1.279 + fHWBoundTextures[s] = NULL; 1.280 + } 1.281 + } 1.282 + 1.283 + if (resetBits & kBlend_GrGLBackendState) { 1.284 + fHWBlendState.invalidate(); 1.285 + } 1.286 + 1.287 + if (resetBits & kView_GrGLBackendState) { 1.288 + fHWScissorSettings.invalidate(); 1.289 + fHWViewport.invalidate(); 1.290 + } 1.291 + 1.292 + if (resetBits & kStencil_GrGLBackendState) { 1.293 + fHWStencilSettings.invalidate(); 1.294 + fHWStencilTestEnabled = kUnknown_TriState; 1.295 + } 1.296 + 1.297 + // Vertex 1.298 + if (resetBits & kVertex_GrGLBackendState) { 1.299 + fHWGeometryState.invalidate(); 1.300 + } 1.301 + 1.302 + if (resetBits & kRenderTarget_GrGLBackendState) { 1.303 + fHWBoundRenderTarget = NULL; 1.304 + } 1.305 + 1.306 + if (resetBits & (kFixedFunction_GrGLBackendState | kPathRendering_GrGLBackendState)) { 1.307 + if (this->glCaps().fixedFunctionSupport()) { 1.308 + fHWProjectionMatrixState.invalidate(); 1.309 + // we don't use the model view matrix. 1.310 + GL_CALL(MatrixMode(GR_GL_MODELVIEW)); 1.311 + GL_CALL(LoadIdentity()); 1.312 + 1.313 + for (int i = 0; i < this->glCaps().maxFixedFunctionTextureCoords(); ++i) { 1.314 + GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + i)); 1.315 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_S)); 1.316 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_T)); 1.317 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_Q)); 1.318 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_R)); 1.319 + if (this->caps()->pathRenderingSupport()) { 1.320 + GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL)); 1.321 + } 1.322 + fHWTexGenSettings[i].fMode = GR_GL_NONE; 1.323 + fHWTexGenSettings[i].fNumComponents = 0; 1.324 + } 1.325 + fHWActiveTexGenSets = 0; 1.326 + } 1.327 + if (this->caps()->pathRenderingSupport()) { 1.328 + fHWPathStencilSettings.invalidate(); 1.329 + } 1.330 + } 1.331 + 1.332 + // we assume these values 1.333 + if (resetBits & kPixelStore_GrGLBackendState) { 1.334 + if (this->glCaps().unpackRowLengthSupport()) { 1.335 + GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 1.336 + } 1.337 + if (this->glCaps().packRowLengthSupport()) { 1.338 + GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 1.339 + } 1.340 + if (this->glCaps().unpackFlipYSupport()) { 1.341 + GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 1.342 + } 1.343 + if (this->glCaps().packFlipYSupport()) { 1.344 + GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); 1.345 + } 1.346 + } 1.347 + 1.348 + if (resetBits & kProgram_GrGLBackendState) { 1.349 + fHWProgramID = 0; 1.350 + fSharedGLProgramState.invalidate(); 1.351 + } 1.352 +} 1.353 + 1.354 +namespace { 1.355 + 1.356 +GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { 1.357 + // By default, GrRenderTargets are GL's normal orientation so that they 1.358 + // can be drawn to by the outside world without the client having 1.359 + // to render upside down. 1.360 + if (kDefault_GrSurfaceOrigin == origin) { 1.361 + return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin; 1.362 + } else { 1.363 + return origin; 1.364 + } 1.365 +} 1.366 + 1.367 +} 1.368 + 1.369 +GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) { 1.370 + if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { 1.371 + return NULL; 1.372 + } 1.373 + 1.374 + if (0 == desc.fTextureHandle) { 1.375 + return NULL; 1.376 + } 1.377 + 1.378 + int maxSize = this->caps()->maxTextureSize(); 1.379 + if (desc.fWidth > maxSize || desc.fHeight > maxSize) { 1.380 + return NULL; 1.381 + } 1.382 + 1.383 + GrGLTexture::Desc glTexDesc; 1.384 + // next line relies on GrBackendTextureDesc's flags matching GrTexture's 1.385 + glTexDesc.fFlags = (GrTextureFlags) desc.fFlags; 1.386 + glTexDesc.fWidth = desc.fWidth; 1.387 + glTexDesc.fHeight = desc.fHeight; 1.388 + glTexDesc.fConfig = desc.fConfig; 1.389 + glTexDesc.fSampleCnt = desc.fSampleCnt; 1.390 + glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle); 1.391 + glTexDesc.fIsWrapped = true; 1.392 + bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag); 1.393 + // FIXME: this should be calling resolve_origin(), but Chrome code is currently 1.394 + // assuming the old behaviour, which is that backend textures are always 1.395 + // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 1.396 + // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 1.397 + if (kDefault_GrSurfaceOrigin == desc.fOrigin) { 1.398 + glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; 1.399 + } else { 1.400 + glTexDesc.fOrigin = desc.fOrigin; 1.401 + } 1.402 + 1.403 + GrGLTexture* texture = NULL; 1.404 + if (renderTarget) { 1.405 + GrGLRenderTarget::Desc glRTDesc; 1.406 + glRTDesc.fRTFBOID = 0; 1.407 + glRTDesc.fTexFBOID = 0; 1.408 + glRTDesc.fMSColorRenderbufferID = 0; 1.409 + glRTDesc.fConfig = desc.fConfig; 1.410 + glRTDesc.fSampleCnt = desc.fSampleCnt; 1.411 + glRTDesc.fOrigin = glTexDesc.fOrigin; 1.412 + glRTDesc.fCheckAllocation = false; 1.413 + if (!this->createRenderTargetObjects(glTexDesc.fWidth, 1.414 + glTexDesc.fHeight, 1.415 + glTexDesc.fTextureID, 1.416 + &glRTDesc)) { 1.417 + return NULL; 1.418 + } 1.419 + texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 1.420 + } else { 1.421 + texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 1.422 + } 1.423 + if (NULL == texture) { 1.424 + return NULL; 1.425 + } 1.426 + 1.427 + return texture; 1.428 +} 1.429 + 1.430 +GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 1.431 + GrGLRenderTarget::Desc glDesc; 1.432 + glDesc.fConfig = desc.fConfig; 1.433 + glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle); 1.434 + glDesc.fMSColorRenderbufferID = 0; 1.435 + glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 1.436 + glDesc.fSampleCnt = desc.fSampleCnt; 1.437 + glDesc.fIsWrapped = true; 1.438 + glDesc.fCheckAllocation = false; 1.439 + 1.440 + glDesc.fOrigin = resolve_origin(desc.fOrigin, true); 1.441 + GrGLIRect viewport; 1.442 + viewport.fLeft = 0; 1.443 + viewport.fBottom = 0; 1.444 + viewport.fWidth = desc.fWidth; 1.445 + viewport.fHeight = desc.fHeight; 1.446 + 1.447 + GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, 1.448 + (this, glDesc, viewport)); 1.449 + if (desc.fStencilBits) { 1.450 + GrGLStencilBuffer::Format format; 1.451 + format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat; 1.452 + format.fPacked = false; 1.453 + format.fStencilBits = desc.fStencilBits; 1.454 + format.fTotalBits = desc.fStencilBits; 1.455 + static const bool kIsSBWrapped = false; 1.456 + GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer, 1.457 + (this, 1.458 + kIsSBWrapped, 1.459 + 0, 1.460 + desc.fWidth, 1.461 + desc.fHeight, 1.462 + desc.fSampleCnt, 1.463 + format)); 1.464 + tgt->setStencilBuffer(sb); 1.465 + sb->unref(); 1.466 + } 1.467 + return tgt; 1.468 +} 1.469 + 1.470 +//////////////////////////////////////////////////////////////////////////////// 1.471 + 1.472 +bool GrGpuGL::onWriteTexturePixels(GrTexture* texture, 1.473 + int left, int top, int width, int height, 1.474 + GrPixelConfig config, const void* buffer, 1.475 + size_t rowBytes) { 1.476 + if (NULL == buffer) { 1.477 + return false; 1.478 + } 1.479 + GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); 1.480 + 1.481 + this->setScratchTextureUnit(); 1.482 + GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); 1.483 + GrGLTexture::Desc desc; 1.484 + desc.fFlags = glTex->desc().fFlags; 1.485 + desc.fWidth = glTex->width(); 1.486 + desc.fHeight = glTex->height(); 1.487 + desc.fConfig = glTex->config(); 1.488 + desc.fSampleCnt = glTex->desc().fSampleCnt; 1.489 + desc.fTextureID = glTex->textureID(); 1.490 + desc.fOrigin = glTex->origin(); 1.491 + 1.492 + if (this->uploadTexData(desc, false, 1.493 + left, top, width, height, 1.494 + config, buffer, rowBytes)) { 1.495 + texture->dirtyMipMaps(true); 1.496 + return true; 1.497 + } else { 1.498 + return false; 1.499 + } 1.500 +} 1.501 + 1.502 +namespace { 1.503 +bool adjust_pixel_ops_params(int surfaceWidth, 1.504 + int surfaceHeight, 1.505 + size_t bpp, 1.506 + int* left, int* top, int* width, int* height, 1.507 + const void** data, 1.508 + size_t* rowBytes) { 1.509 + if (!*rowBytes) { 1.510 + *rowBytes = *width * bpp; 1.511 + } 1.512 + 1.513 + SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height); 1.514 + SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight); 1.515 + 1.516 + if (!subRect.intersect(bounds)) { 1.517 + return false; 1.518 + } 1.519 + *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) + 1.520 + (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp); 1.521 + 1.522 + *left = subRect.fLeft; 1.523 + *top = subRect.fTop; 1.524 + *width = subRect.width(); 1.525 + *height = subRect.height(); 1.526 + return true; 1.527 +} 1.528 + 1.529 +GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) { 1.530 + if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) { 1.531 + return GR_GL_GET_ERROR(interface); 1.532 + } else { 1.533 + return CHECK_ALLOC_ERROR(interface); 1.534 + } 1.535 +} 1.536 + 1.537 +} 1.538 + 1.539 +bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc, 1.540 + bool isNewTexture, 1.541 + int left, int top, int width, int height, 1.542 + GrPixelConfig dataConfig, 1.543 + const void* data, 1.544 + size_t rowBytes) { 1.545 + SkASSERT(NULL != data || isNewTexture); 1.546 + 1.547 + size_t bpp = GrBytesPerPixel(dataConfig); 1.548 + if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top, 1.549 + &width, &height, &data, &rowBytes)) { 1.550 + return false; 1.551 + } 1.552 + size_t trimRowBytes = width * bpp; 1.553 + 1.554 + // in case we need a temporary, trimmed copy of the src pixels 1.555 + SkAutoSMalloc<128 * 128> tempStorage; 1.556 + 1.557 + // paletted textures cannot be partially updated 1.558 + // We currently lazily create MIPMAPs when the we see a draw with 1.559 + // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the 1.560 + // MIP levels are all created when the texture is created. So for now we don't use 1.561 + // texture storage. 1.562 + bool useTexStorage = false && 1.563 + isNewTexture && 1.564 + desc.fConfig != kIndex_8_GrPixelConfig && 1.565 + this->glCaps().texStorageSupport(); 1.566 + 1.567 + if (useTexStorage && kGL_GrGLStandard == this->glStandard()) { 1.568 + // 565 is not a sized internal format on desktop GL. So on desktop with 1.569 + // 565 we always use an unsized internal format to let the system pick 1.570 + // the best sized format to convert the 565 data to. Since TexStorage 1.571 + // only allows sized internal formats we will instead use TexImage2D. 1.572 + useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig; 1.573 + } 1.574 + 1.575 + GrGLenum internalFormat; 1.576 + GrGLenum externalFormat; 1.577 + GrGLenum externalType; 1.578 + // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized 1.579 + // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the 1.580 + // size of the internal format whenever possible and so only use a sized internal format when 1.581 + // using texture storage. 1.582 + if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat, 1.583 + &externalFormat, &externalType)) { 1.584 + return false; 1.585 + } 1.586 + 1.587 + if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) { 1.588 + // paletted textures cannot be updated 1.589 + return false; 1.590 + } 1.591 + 1.592 + /* 1.593 + * check whether to allocate a temporary buffer for flipping y or 1.594 + * because our srcData has extra bytes past each row. If so, we need 1.595 + * to trim those off here, since GL ES may not let us specify 1.596 + * GL_UNPACK_ROW_LENGTH. 1.597 + */ 1.598 + bool restoreGLRowLength = false; 1.599 + bool swFlipY = false; 1.600 + bool glFlipY = false; 1.601 + if (NULL != data) { 1.602 + if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 1.603 + if (this->glCaps().unpackFlipYSupport()) { 1.604 + glFlipY = true; 1.605 + } else { 1.606 + swFlipY = true; 1.607 + } 1.608 + } 1.609 + if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { 1.610 + // can't use this for flipping, only non-neg values allowed. :( 1.611 + if (rowBytes != trimRowBytes) { 1.612 + GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 1.613 + GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 1.614 + restoreGLRowLength = true; 1.615 + } 1.616 + } else { 1.617 + if (trimRowBytes != rowBytes || swFlipY) { 1.618 + // copy data into our new storage, skipping the trailing bytes 1.619 + size_t trimSize = height * trimRowBytes; 1.620 + const char* src = (const char*)data; 1.621 + if (swFlipY) { 1.622 + src += (height - 1) * rowBytes; 1.623 + } 1.624 + char* dst = (char*)tempStorage.reset(trimSize); 1.625 + for (int y = 0; y < height; y++) { 1.626 + memcpy(dst, src, trimRowBytes); 1.627 + if (swFlipY) { 1.628 + src -= rowBytes; 1.629 + } else { 1.630 + src += rowBytes; 1.631 + } 1.632 + dst += trimRowBytes; 1.633 + } 1.634 + // now point data to our copied version 1.635 + data = tempStorage.get(); 1.636 + } 1.637 + } 1.638 + if (glFlipY) { 1.639 + GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 1.640 + } 1.641 + GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast<GrGLint>(bpp))); 1.642 + } 1.643 + bool succeeded = true; 1.644 + if (isNewTexture && 1.645 + 0 == left && 0 == top && 1.646 + desc.fWidth == width && desc.fHeight == height) { 1.647 + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1.648 + if (useTexStorage) { 1.649 + // We never resize or change formats of textures. 1.650 + GL_ALLOC_CALL(this->glInterface(), 1.651 + TexStorage2D(GR_GL_TEXTURE_2D, 1.652 + 1, // levels 1.653 + internalFormat, 1.654 + desc.fWidth, desc.fHeight)); 1.655 + } else { 1.656 + if (GR_GL_PALETTE8_RGBA8 == internalFormat) { 1.657 + GrGLsizei imageSize = desc.fWidth * desc.fHeight + 1.658 + kGrColorTableSize; 1.659 + GL_ALLOC_CALL(this->glInterface(), 1.660 + CompressedTexImage2D(GR_GL_TEXTURE_2D, 1.661 + 0, // level 1.662 + internalFormat, 1.663 + desc.fWidth, desc.fHeight, 1.664 + 0, // border 1.665 + imageSize, 1.666 + data)); 1.667 + } else { 1.668 + GL_ALLOC_CALL(this->glInterface(), 1.669 + TexImage2D(GR_GL_TEXTURE_2D, 1.670 + 0, // level 1.671 + internalFormat, 1.672 + desc.fWidth, desc.fHeight, 1.673 + 0, // border 1.674 + externalFormat, externalType, 1.675 + data)); 1.676 + } 1.677 + } 1.678 + GrGLenum error = check_alloc_error(desc, this->glInterface()); 1.679 + if (error != GR_GL_NO_ERROR) { 1.680 + succeeded = false; 1.681 + } else { 1.682 + // if we have data and we used TexStorage to create the texture, we 1.683 + // now upload with TexSubImage. 1.684 + if (NULL != data && useTexStorage) { 1.685 + GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 1.686 + 0, // level 1.687 + left, top, 1.688 + width, height, 1.689 + externalFormat, externalType, 1.690 + data)); 1.691 + } 1.692 + } 1.693 + } else { 1.694 + if (swFlipY || glFlipY) { 1.695 + top = desc.fHeight - (top + height); 1.696 + } 1.697 + GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 1.698 + 0, // level 1.699 + left, top, 1.700 + width, height, 1.701 + externalFormat, externalType, data)); 1.702 + } 1.703 + 1.704 + if (restoreGLRowLength) { 1.705 + SkASSERT(this->glCaps().unpackRowLengthSupport()); 1.706 + GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 1.707 + } 1.708 + if (glFlipY) { 1.709 + GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 1.710 + } 1.711 + return succeeded; 1.712 +} 1.713 + 1.714 +static bool renderbuffer_storage_msaa(GrGLContext& ctx, 1.715 + int sampleCount, 1.716 + GrGLenum format, 1.717 + int width, int height) { 1.718 + CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 1.719 + SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); 1.720 + switch (ctx.caps()->msFBOType()) { 1.721 + case GrGLCaps::kDesktop_ARB_MSFBOType: 1.722 + case GrGLCaps::kDesktop_EXT_MSFBOType: 1.723 + case GrGLCaps::kES_3_0_MSFBOType: 1.724 + GL_ALLOC_CALL(ctx.interface(), 1.725 + RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, 1.726 + sampleCount, 1.727 + format, 1.728 + width, height)); 1.729 + break; 1.730 + case GrGLCaps::kES_Apple_MSFBOType: 1.731 + GL_ALLOC_CALL(ctx.interface(), 1.732 + RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, 1.733 + sampleCount, 1.734 + format, 1.735 + width, height)); 1.736 + break; 1.737 + case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: 1.738 + case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: 1.739 + GL_ALLOC_CALL(ctx.interface(), 1.740 + RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, 1.741 + sampleCount, 1.742 + format, 1.743 + width, height)); 1.744 + break; 1.745 + case GrGLCaps::kNone_MSFBOType: 1.746 + GrCrash("Shouldn't be here if we don't support multisampled renderbuffers."); 1.747 + break; 1.748 + } 1.749 + return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));; 1.750 +} 1.751 + 1.752 +bool GrGpuGL::createRenderTargetObjects(int width, int height, 1.753 + GrGLuint texID, 1.754 + GrGLRenderTarget::Desc* desc) { 1.755 + desc->fMSColorRenderbufferID = 0; 1.756 + desc->fRTFBOID = 0; 1.757 + desc->fTexFBOID = 0; 1.758 + desc->fIsWrapped = false; 1.759 + 1.760 + GrGLenum status; 1.761 + 1.762 + GrGLenum msColorFormat = 0; // suppress warning 1.763 + 1.764 + if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { 1.765 + goto FAILED; 1.766 + } 1.767 + 1.768 + GL_CALL(GenFramebuffers(1, &desc->fTexFBOID)); 1.769 + if (!desc->fTexFBOID) { 1.770 + goto FAILED; 1.771 + } 1.772 + 1.773 + 1.774 + // If we are using multisampling we will create two FBOS. We render to one and then resolve to 1.775 + // the texture bound to the other. The exception is the IMG multisample extension. With this 1.776 + // extension the texture is multisampled when rendered to and then auto-resolves it when it is 1.777 + // rendered from. 1.778 + if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { 1.779 + GL_CALL(GenFramebuffers(1, &desc->fRTFBOID)); 1.780 + GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID)); 1.781 + if (!desc->fRTFBOID || 1.782 + !desc->fMSColorRenderbufferID || 1.783 + !this->configToGLFormats(desc->fConfig, 1.784 + // ES2 and ES3 require sized internal formats for rb storage. 1.785 + kGLES_GrGLStandard == this->glStandard(), 1.786 + &msColorFormat, 1.787 + NULL, 1.788 + NULL)) { 1.789 + goto FAILED; 1.790 + } 1.791 + } else { 1.792 + desc->fRTFBOID = desc->fTexFBOID; 1.793 + } 1.794 + 1.795 + // below here we may bind the FBO 1.796 + fHWBoundRenderTarget = NULL; 1.797 + if (desc->fRTFBOID != desc->fTexFBOID) { 1.798 + SkASSERT(desc->fSampleCnt > 0); 1.799 + GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, 1.800 + desc->fMSColorRenderbufferID)); 1.801 + if (!renderbuffer_storage_msaa(fGLContext, 1.802 + desc->fSampleCnt, 1.803 + msColorFormat, 1.804 + width, height)) { 1.805 + goto FAILED; 1.806 + } 1.807 + GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID)); 1.808 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.809 + GR_GL_COLOR_ATTACHMENT0, 1.810 + GR_GL_RENDERBUFFER, 1.811 + desc->fMSColorRenderbufferID)); 1.812 + if (desc->fCheckAllocation || 1.813 + !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 1.814 + GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1.815 + if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1.816 + goto FAILED; 1.817 + } 1.818 + fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); 1.819 + } 1.820 + } 1.821 + GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID)); 1.822 + 1.823 + if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) { 1.824 + GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, 1.825 + GR_GL_COLOR_ATTACHMENT0, 1.826 + GR_GL_TEXTURE_2D, 1.827 + texID, 0, desc->fSampleCnt)); 1.828 + } else { 1.829 + GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 1.830 + GR_GL_COLOR_ATTACHMENT0, 1.831 + GR_GL_TEXTURE_2D, 1.832 + texID, 0)); 1.833 + } 1.834 + if (desc->fCheckAllocation || 1.835 + !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 1.836 + GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1.837 + if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1.838 + goto FAILED; 1.839 + } 1.840 + fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); 1.841 + } 1.842 + 1.843 + return true; 1.844 + 1.845 +FAILED: 1.846 + if (desc->fMSColorRenderbufferID) { 1.847 + GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID)); 1.848 + } 1.849 + if (desc->fRTFBOID != desc->fTexFBOID) { 1.850 + GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID)); 1.851 + } 1.852 + if (desc->fTexFBOID) { 1.853 + GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID)); 1.854 + } 1.855 + return false; 1.856 +} 1.857 + 1.858 +// good to set a break-point here to know when createTexture fails 1.859 +static GrTexture* return_null_texture() { 1.860 +// SkDEBUGFAIL("null texture"); 1.861 + return NULL; 1.862 +} 1.863 + 1.864 +#if 0 && defined(SK_DEBUG) 1.865 +static size_t as_size_t(int x) { 1.866 + return x; 1.867 +} 1.868 +#endif 1.869 + 1.870 +GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc, 1.871 + const void* srcData, 1.872 + size_t rowBytes) { 1.873 + 1.874 + GrGLTexture::Desc glTexDesc; 1.875 + GrGLRenderTarget::Desc glRTDesc; 1.876 + 1.877 + // Attempt to catch un- or wrongly initialized sample counts; 1.878 + SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); 1.879 + // We fail if the MSAA was requested and is not available. 1.880 + if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) { 1.881 + //GrPrintf("MSAA RT requested but not supported on this platform."); 1.882 + return return_null_texture(); 1.883 + } 1.884 + // If the sample count exceeds the max then we clamp it. 1.885 + glTexDesc.fSampleCnt = GrMin(desc.fSampleCnt, this->caps()->maxSampleCount()); 1.886 + 1.887 + glTexDesc.fFlags = desc.fFlags; 1.888 + glTexDesc.fWidth = desc.fWidth; 1.889 + glTexDesc.fHeight = desc.fHeight; 1.890 + glTexDesc.fConfig = desc.fConfig; 1.891 + glTexDesc.fIsWrapped = false; 1.892 + 1.893 + glRTDesc.fMSColorRenderbufferID = 0; 1.894 + glRTDesc.fRTFBOID = 0; 1.895 + glRTDesc.fTexFBOID = 0; 1.896 + glRTDesc.fIsWrapped = false; 1.897 + glRTDesc.fConfig = glTexDesc.fConfig; 1.898 + glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit); 1.899 + 1.900 + bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit); 1.901 + 1.902 + glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 1.903 + glRTDesc.fOrigin = glTexDesc.fOrigin; 1.904 + 1.905 + glRTDesc.fSampleCnt = glTexDesc.fSampleCnt; 1.906 + if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && 1.907 + desc.fSampleCnt) { 1.908 + //GrPrintf("MSAA RT requested but not supported on this platform."); 1.909 + return return_null_texture(); 1.910 + } 1.911 + 1.912 + if (renderTarget) { 1.913 + int maxRTSize = this->caps()->maxRenderTargetSize(); 1.914 + if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) { 1.915 + return return_null_texture(); 1.916 + } 1.917 + } else { 1.918 + int maxSize = this->caps()->maxTextureSize(); 1.919 + if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) { 1.920 + return return_null_texture(); 1.921 + } 1.922 + } 1.923 + 1.924 + GL_CALL(GenTextures(1, &glTexDesc.fTextureID)); 1.925 + 1.926 + if (!glTexDesc.fTextureID) { 1.927 + return return_null_texture(); 1.928 + } 1.929 + 1.930 + this->setScratchTextureUnit(); 1.931 + GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID)); 1.932 + 1.933 + if (renderTarget && this->glCaps().textureUsageSupport()) { 1.934 + // provides a hint about how this texture will be used 1.935 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.936 + GR_GL_TEXTURE_USAGE, 1.937 + GR_GL_FRAMEBUFFER_ATTACHMENT)); 1.938 + } 1.939 + 1.940 + // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1.941 + // drivers have a bug where an FBO won't be complete if it includes a 1.942 + // texture that is not mipmap complete (considering the filter in use). 1.943 + GrGLTexture::TexParams initialTexParams; 1.944 + // we only set a subset here so invalidate first 1.945 + initialTexParams.invalidate(); 1.946 + initialTexParams.fMinFilter = GR_GL_NEAREST; 1.947 + initialTexParams.fMagFilter = GR_GL_NEAREST; 1.948 + initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; 1.949 + initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; 1.950 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.951 + GR_GL_TEXTURE_MAG_FILTER, 1.952 + initialTexParams.fMagFilter)); 1.953 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.954 + GR_GL_TEXTURE_MIN_FILTER, 1.955 + initialTexParams.fMinFilter)); 1.956 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.957 + GR_GL_TEXTURE_WRAP_S, 1.958 + initialTexParams.fWrapS)); 1.959 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.960 + GR_GL_TEXTURE_WRAP_T, 1.961 + initialTexParams.fWrapT)); 1.962 + if (!this->uploadTexData(glTexDesc, true, 0, 0, 1.963 + glTexDesc.fWidth, glTexDesc.fHeight, 1.964 + desc.fConfig, srcData, rowBytes)) { 1.965 + GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1.966 + return return_null_texture(); 1.967 + } 1.968 + 1.969 + GrGLTexture* tex; 1.970 + if (renderTarget) { 1.971 + // unbind the texture from the texture unit before binding it to the frame buffer 1.972 + GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); 1.973 + 1.974 + if (!this->createRenderTargetObjects(glTexDesc.fWidth, 1.975 + glTexDesc.fHeight, 1.976 + glTexDesc.fTextureID, 1.977 + &glRTDesc)) { 1.978 + GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1.979 + return return_null_texture(); 1.980 + } 1.981 + tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 1.982 + } else { 1.983 + tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 1.984 + } 1.985 + tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1.986 +#ifdef TRACE_TEXTURE_CREATION 1.987 + GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n", 1.988 + glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1.989 +#endif 1.990 + return tex; 1.991 +} 1.992 + 1.993 +namespace { 1.994 + 1.995 +const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount; 1.996 + 1.997 +void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1.998 + GrGLStencilBuffer::Format* format) { 1.999 + 1.1000 + // we shouldn't ever know one size and not the other 1.1001 + SkASSERT((kUnknownBitCount == format->fStencilBits) == 1.1002 + (kUnknownBitCount == format->fTotalBits)); 1.1003 + if (kUnknownBitCount == format->fStencilBits) { 1.1004 + GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1.1005 + GR_GL_RENDERBUFFER_STENCIL_SIZE, 1.1006 + (GrGLint*)&format->fStencilBits); 1.1007 + if (format->fPacked) { 1.1008 + GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1.1009 + GR_GL_RENDERBUFFER_DEPTH_SIZE, 1.1010 + (GrGLint*)&format->fTotalBits); 1.1011 + format->fTotalBits += format->fStencilBits; 1.1012 + } else { 1.1013 + format->fTotalBits = format->fStencilBits; 1.1014 + } 1.1015 + } 1.1016 +} 1.1017 +} 1.1018 + 1.1019 +bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt, 1.1020 + int width, int height) { 1.1021 + 1.1022 + // All internally created RTs are also textures. We don't create 1.1023 + // SBs for a client's standalone RT (that is a RT that isn't also a texture). 1.1024 + SkASSERT(rt->asTexture()); 1.1025 + SkASSERT(width >= rt->width()); 1.1026 + SkASSERT(height >= rt->height()); 1.1027 + 1.1028 + int samples = rt->numSamples(); 1.1029 + GrGLuint sbID; 1.1030 + GL_CALL(GenRenderbuffers(1, &sbID)); 1.1031 + if (!sbID) { 1.1032 + return false; 1.1033 + } 1.1034 + 1.1035 + int stencilFmtCnt = this->glCaps().stencilFormats().count(); 1.1036 + for (int i = 0; i < stencilFmtCnt; ++i) { 1.1037 + GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID)); 1.1038 + // we start with the last stencil format that succeeded in hopes 1.1039 + // that we won't go through this loop more than once after the 1.1040 + // first (painful) stencil creation. 1.1041 + int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt; 1.1042 + const GrGLCaps::StencilFormat& sFmt = 1.1043 + this->glCaps().stencilFormats()[sIdx]; 1.1044 + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1.1045 + // we do this "if" so that we don't call the multisample 1.1046 + // version on a GL that doesn't have an MSAA extension. 1.1047 + bool created; 1.1048 + if (samples > 0) { 1.1049 + created = renderbuffer_storage_msaa(fGLContext, 1.1050 + samples, 1.1051 + sFmt.fInternalFormat, 1.1052 + width, height); 1.1053 + } else { 1.1054 + GL_ALLOC_CALL(this->glInterface(), 1.1055 + RenderbufferStorage(GR_GL_RENDERBUFFER, 1.1056 + sFmt.fInternalFormat, 1.1057 + width, height)); 1.1058 + created = 1.1059 + (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface())); 1.1060 + } 1.1061 + if (created) { 1.1062 + // After sized formats we attempt an unsized format and take 1.1063 + // whatever sizes GL gives us. In that case we query for the size. 1.1064 + GrGLStencilBuffer::Format format = sFmt; 1.1065 + get_stencil_rb_sizes(this->glInterface(), &format); 1.1066 + static const bool kIsWrapped = false; 1.1067 + SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer, 1.1068 + (this, kIsWrapped, sbID, width, height, 1.1069 + samples, format))); 1.1070 + if (this->attachStencilBufferToRenderTarget(sb, rt)) { 1.1071 + fLastSuccessfulStencilFmtIdx = sIdx; 1.1072 + sb->transferToCache(); 1.1073 + rt->setStencilBuffer(sb); 1.1074 + return true; 1.1075 + } 1.1076 + sb->abandon(); // otherwise we lose sbID 1.1077 + } 1.1078 + } 1.1079 + GL_CALL(DeleteRenderbuffers(1, &sbID)); 1.1080 + return false; 1.1081 +} 1.1082 + 1.1083 +bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) { 1.1084 + GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt; 1.1085 + 1.1086 + GrGLuint fbo = glrt->renderFBOID(); 1.1087 + 1.1088 + if (NULL == sb) { 1.1089 + if (NULL != rt->getStencilBuffer()) { 1.1090 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1091 + GR_GL_STENCIL_ATTACHMENT, 1.1092 + GR_GL_RENDERBUFFER, 0)); 1.1093 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1094 + GR_GL_DEPTH_ATTACHMENT, 1.1095 + GR_GL_RENDERBUFFER, 0)); 1.1096 +#ifdef SK_DEBUG 1.1097 + GrGLenum status; 1.1098 + GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1.1099 + SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); 1.1100 +#endif 1.1101 + } 1.1102 + return true; 1.1103 + } else { 1.1104 + GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb); 1.1105 + GrGLuint rb = glsb->renderbufferID(); 1.1106 + 1.1107 + fHWBoundRenderTarget = NULL; 1.1108 + GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo)); 1.1109 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1110 + GR_GL_STENCIL_ATTACHMENT, 1.1111 + GR_GL_RENDERBUFFER, rb)); 1.1112 + if (glsb->format().fPacked) { 1.1113 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1114 + GR_GL_DEPTH_ATTACHMENT, 1.1115 + GR_GL_RENDERBUFFER, rb)); 1.1116 + } else { 1.1117 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1118 + GR_GL_DEPTH_ATTACHMENT, 1.1119 + GR_GL_RENDERBUFFER, 0)); 1.1120 + } 1.1121 + 1.1122 + GrGLenum status; 1.1123 + if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) { 1.1124 + GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1.1125 + if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1.1126 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1127 + GR_GL_STENCIL_ATTACHMENT, 1.1128 + GR_GL_RENDERBUFFER, 0)); 1.1129 + if (glsb->format().fPacked) { 1.1130 + GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1.1131 + GR_GL_DEPTH_ATTACHMENT, 1.1132 + GR_GL_RENDERBUFFER, 0)); 1.1133 + } 1.1134 + return false; 1.1135 + } else { 1.1136 + fGLContext.caps()->markColorConfigAndStencilFormatAsVerified( 1.1137 + rt->config(), 1.1138 + glsb->format()); 1.1139 + } 1.1140 + } 1.1141 + return true; 1.1142 + } 1.1143 +} 1.1144 + 1.1145 +//////////////////////////////////////////////////////////////////////////////// 1.1146 + 1.1147 +GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) { 1.1148 + GrGLVertexBuffer::Desc desc; 1.1149 + desc.fDynamic = dynamic; 1.1150 + desc.fSizeInBytes = size; 1.1151 + desc.fIsWrapped = false; 1.1152 + 1.1153 + if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1.1154 + desc.fID = 0; 1.1155 + GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1.1156 + return vertexBuffer; 1.1157 + } else { 1.1158 + GL_CALL(GenBuffers(1, &desc.fID)); 1.1159 + if (desc.fID) { 1.1160 + fHWGeometryState.setVertexBufferID(this, desc.fID); 1.1161 + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1.1162 + // make sure driver can allocate memory for this buffer 1.1163 + GL_ALLOC_CALL(this->glInterface(), 1.1164 + BufferData(GR_GL_ARRAY_BUFFER, 1.1165 + (GrGLsizeiptr) desc.fSizeInBytes, 1.1166 + NULL, // data ptr 1.1167 + desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1.1168 + if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1.1169 + GL_CALL(DeleteBuffers(1, &desc.fID)); 1.1170 + this->notifyVertexBufferDelete(desc.fID); 1.1171 + return NULL; 1.1172 + } 1.1173 + GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1.1174 + return vertexBuffer; 1.1175 + } 1.1176 + return NULL; 1.1177 + } 1.1178 +} 1.1179 + 1.1180 +GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) { 1.1181 + GrGLIndexBuffer::Desc desc; 1.1182 + desc.fDynamic = dynamic; 1.1183 + desc.fSizeInBytes = size; 1.1184 + desc.fIsWrapped = false; 1.1185 + 1.1186 + if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1.1187 + desc.fID = 0; 1.1188 + GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1.1189 + return indexBuffer; 1.1190 + } else { 1.1191 + GL_CALL(GenBuffers(1, &desc.fID)); 1.1192 + if (desc.fID) { 1.1193 + fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID); 1.1194 + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1.1195 + // make sure driver can allocate memory for this buffer 1.1196 + GL_ALLOC_CALL(this->glInterface(), 1.1197 + BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, 1.1198 + (GrGLsizeiptr) desc.fSizeInBytes, 1.1199 + NULL, // data ptr 1.1200 + desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1.1201 + if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1.1202 + GL_CALL(DeleteBuffers(1, &desc.fID)); 1.1203 + this->notifyIndexBufferDelete(desc.fID); 1.1204 + return NULL; 1.1205 + } 1.1206 + GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1.1207 + return indexBuffer; 1.1208 + } 1.1209 + return NULL; 1.1210 + } 1.1211 +} 1.1212 + 1.1213 +GrPath* GrGpuGL::onCreatePath(const SkPath& inPath, const SkStrokeRec& stroke) { 1.1214 + SkASSERT(this->caps()->pathRenderingSupport()); 1.1215 + return SkNEW_ARGS(GrGLPath, (this, inPath, stroke)); 1.1216 +} 1.1217 + 1.1218 +void GrGpuGL::flushScissor() { 1.1219 + if (fScissorState.fEnabled) { 1.1220 + // Only access the RT if scissoring is being enabled. We can call this before performing 1.1221 + // a glBitframebuffer for a surface->surface copy, which requires no RT to be bound to the 1.1222 + // GrDrawState. 1.1223 + const GrDrawState& drawState = this->getDrawState(); 1.1224 + const GrGLRenderTarget* rt = 1.1225 + static_cast<const GrGLRenderTarget*>(drawState.getRenderTarget()); 1.1226 + 1.1227 + SkASSERT(NULL != rt); 1.1228 + const GrGLIRect& vp = rt->getViewport(); 1.1229 + GrGLIRect scissor; 1.1230 + scissor.setRelativeTo(vp, 1.1231 + fScissorState.fRect.fLeft, 1.1232 + fScissorState.fRect.fTop, 1.1233 + fScissorState.fRect.width(), 1.1234 + fScissorState.fRect.height(), 1.1235 + rt->origin()); 1.1236 + // if the scissor fully contains the viewport then we fall through and 1.1237 + // disable the scissor test. 1.1238 + if (!scissor.contains(vp)) { 1.1239 + if (fHWScissorSettings.fRect != scissor) { 1.1240 + scissor.pushToGLScissor(this->glInterface()); 1.1241 + fHWScissorSettings.fRect = scissor; 1.1242 + } 1.1243 + if (kYes_TriState != fHWScissorSettings.fEnabled) { 1.1244 + GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1.1245 + fHWScissorSettings.fEnabled = kYes_TriState; 1.1246 + } 1.1247 + return; 1.1248 + } 1.1249 + } 1.1250 + if (kNo_TriState != fHWScissorSettings.fEnabled) { 1.1251 + GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 1.1252 + fHWScissorSettings.fEnabled = kNo_TriState; 1.1253 + return; 1.1254 + } 1.1255 +} 1.1256 + 1.1257 +void GrGpuGL::onClear(const SkIRect* rect, GrColor color, bool canIgnoreRect) { 1.1258 + const GrDrawState& drawState = this->getDrawState(); 1.1259 + const GrRenderTarget* rt = drawState.getRenderTarget(); 1.1260 + // parent class should never let us get here with no RT 1.1261 + SkASSERT(NULL != rt); 1.1262 + 1.1263 + if (canIgnoreRect && this->glCaps().fullClearIsFree()) { 1.1264 + rect = NULL; 1.1265 + } 1.1266 + 1.1267 + SkIRect clippedRect; 1.1268 + if (NULL != rect) { 1.1269 + // flushScissor expects rect to be clipped to the target. 1.1270 + clippedRect = *rect; 1.1271 + SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height()); 1.1272 + if (clippedRect.intersect(rtRect)) { 1.1273 + rect = &clippedRect; 1.1274 + } else { 1.1275 + return; 1.1276 + } 1.1277 + } 1.1278 + 1.1279 + this->flushRenderTarget(rect); 1.1280 + GrAutoTRestore<ScissorState> asr(&fScissorState); 1.1281 + fScissorState.fEnabled = (NULL != rect); 1.1282 + if (fScissorState.fEnabled) { 1.1283 + fScissorState.fRect = *rect; 1.1284 + } 1.1285 + this->flushScissor(); 1.1286 + 1.1287 + GrGLfloat r, g, b, a; 1.1288 + static const GrGLfloat scale255 = 1.f / 255.f; 1.1289 + a = GrColorUnpackA(color) * scale255; 1.1290 + GrGLfloat scaleRGB = scale255; 1.1291 + r = GrColorUnpackR(color) * scaleRGB; 1.1292 + g = GrColorUnpackG(color) * scaleRGB; 1.1293 + b = GrColorUnpackB(color) * scaleRGB; 1.1294 + 1.1295 + GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 1.1296 + fHWWriteToColor = kYes_TriState; 1.1297 + GL_CALL(ClearColor(r, g, b, a)); 1.1298 + GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 1.1299 +} 1.1300 + 1.1301 +void GrGpuGL::clearStencil() { 1.1302 + if (NULL == this->getDrawState().getRenderTarget()) { 1.1303 + return; 1.1304 + } 1.1305 + 1.1306 + this->flushRenderTarget(&SkIRect::EmptyIRect()); 1.1307 + 1.1308 + GrAutoTRestore<ScissorState> asr(&fScissorState); 1.1309 + fScissorState.fEnabled = false; 1.1310 + this->flushScissor(); 1.1311 + 1.1312 + GL_CALL(StencilMask(0xffffffff)); 1.1313 + GL_CALL(ClearStencil(0)); 1.1314 + GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1.1315 + fHWStencilSettings.invalidate(); 1.1316 +} 1.1317 + 1.1318 +void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) { 1.1319 + const GrDrawState& drawState = this->getDrawState(); 1.1320 + const GrRenderTarget* rt = drawState.getRenderTarget(); 1.1321 + SkASSERT(NULL != rt); 1.1322 + 1.1323 + // this should only be called internally when we know we have a 1.1324 + // stencil buffer. 1.1325 + SkASSERT(NULL != rt->getStencilBuffer()); 1.1326 + GrGLint stencilBitCount = rt->getStencilBuffer()->bits(); 1.1327 +#if 0 1.1328 + SkASSERT(stencilBitCount > 0); 1.1329 + GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 1.1330 +#else 1.1331 + // we could just clear the clip bit but when we go through 1.1332 + // ANGLE a partial stencil mask will cause clears to be 1.1333 + // turned into draws. Our contract on GrDrawTarget says that 1.1334 + // changing the clip between stencil passes may or may not 1.1335 + // zero the client's clip bits. So we just clear the whole thing. 1.1336 + static const GrGLint clipStencilMask = ~0; 1.1337 +#endif 1.1338 + GrGLint value; 1.1339 + if (insideClip) { 1.1340 + value = (1 << (stencilBitCount - 1)); 1.1341 + } else { 1.1342 + value = 0; 1.1343 + } 1.1344 + this->flushRenderTarget(&SkIRect::EmptyIRect()); 1.1345 + 1.1346 + GrAutoTRestore<ScissorState> asr(&fScissorState); 1.1347 + fScissorState.fEnabled = true; 1.1348 + fScissorState.fRect = rect; 1.1349 + this->flushScissor(); 1.1350 + 1.1351 + GL_CALL(StencilMask((uint32_t) clipStencilMask)); 1.1352 + GL_CALL(ClearStencil(value)); 1.1353 + GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1.1354 + fHWStencilSettings.invalidate(); 1.1355 +} 1.1356 + 1.1357 +void GrGpuGL::onForceRenderTargetFlush() { 1.1358 + this->flushRenderTarget(&SkIRect::EmptyIRect()); 1.1359 +} 1.1360 + 1.1361 +bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, 1.1362 + int left, int top, 1.1363 + int width, int height, 1.1364 + GrPixelConfig config, 1.1365 + size_t rowBytes) const { 1.1366 + // If this rendertarget is aready TopLeft, we don't need to flip. 1.1367 + if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { 1.1368 + return false; 1.1369 + } 1.1370 + 1.1371 + // if GL can do the flip then we'll never pay for it. 1.1372 + if (this->glCaps().packFlipYSupport()) { 1.1373 + return false; 1.1374 + } 1.1375 + 1.1376 + // If we have to do memcpy to handle non-trim rowBytes then we 1.1377 + // get the flip for free. Otherwise it costs. 1.1378 + if (this->glCaps().packRowLengthSupport()) { 1.1379 + return true; 1.1380 + } 1.1381 + // If we have to do memcpys to handle rowBytes then y-flip is free 1.1382 + // Note the rowBytes might be tight to the passed in data, but if data 1.1383 + // gets clipped in x to the target the rowBytes will no longer be tight. 1.1384 + if (left >= 0 && (left + width) < renderTarget->width()) { 1.1385 + return 0 == rowBytes || 1.1386 + GrBytesPerPixel(config) * width == rowBytes; 1.1387 + } else { 1.1388 + return false; 1.1389 + } 1.1390 +} 1.1391 + 1.1392 +bool GrGpuGL::onReadPixels(GrRenderTarget* target, 1.1393 + int left, int top, 1.1394 + int width, int height, 1.1395 + GrPixelConfig config, 1.1396 + void* buffer, 1.1397 + size_t rowBytes) { 1.1398 + GrGLenum format; 1.1399 + GrGLenum type; 1.1400 + bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin(); 1.1401 + if (!this->configToGLFormats(config, false, NULL, &format, &type)) { 1.1402 + return false; 1.1403 + } 1.1404 + size_t bpp = GrBytesPerPixel(config); 1.1405 + if (!adjust_pixel_ops_params(target->width(), target->height(), bpp, 1.1406 + &left, &top, &width, &height, 1.1407 + const_cast<const void**>(&buffer), 1.1408 + &rowBytes)) { 1.1409 + return false; 1.1410 + } 1.1411 + 1.1412 + // resolve the render target if necessary 1.1413 + GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target); 1.1414 + GrDrawState::AutoRenderTargetRestore artr; 1.1415 + switch (tgt->getResolveType()) { 1.1416 + case GrGLRenderTarget::kCantResolve_ResolveType: 1.1417 + return false; 1.1418 + case GrGLRenderTarget::kAutoResolves_ResolveType: 1.1419 + artr.set(this->drawState(), target); 1.1420 + this->flushRenderTarget(&SkIRect::EmptyIRect()); 1.1421 + break; 1.1422 + case GrGLRenderTarget::kCanResolve_ResolveType: 1.1423 + this->onResolveRenderTarget(tgt); 1.1424 + // we don't track the state of the READ FBO ID. 1.1425 + GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, 1.1426 + tgt->textureFBOID())); 1.1427 + break; 1.1428 + default: 1.1429 + GrCrash("Unknown resolve type"); 1.1430 + } 1.1431 + 1.1432 + const GrGLIRect& glvp = tgt->getViewport(); 1.1433 + 1.1434 + // the read rect is viewport-relative 1.1435 + GrGLIRect readRect; 1.1436 + readRect.setRelativeTo(glvp, left, top, width, height, target->origin()); 1.1437 + 1.1438 + size_t tightRowBytes = bpp * width; 1.1439 + if (0 == rowBytes) { 1.1440 + rowBytes = tightRowBytes; 1.1441 + } 1.1442 + size_t readDstRowBytes = tightRowBytes; 1.1443 + void* readDst = buffer; 1.1444 + 1.1445 + // determine if GL can read using the passed rowBytes or if we need 1.1446 + // a scratch buffer. 1.1447 + SkAutoSMalloc<32 * sizeof(GrColor)> scratch; 1.1448 + if (rowBytes != tightRowBytes) { 1.1449 + if (this->glCaps().packRowLengthSupport()) { 1.1450 + SkASSERT(!(rowBytes % sizeof(GrColor))); 1.1451 + GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 1.1452 + static_cast<GrGLint>(rowBytes / sizeof(GrColor)))); 1.1453 + readDstRowBytes = rowBytes; 1.1454 + } else { 1.1455 + scratch.reset(tightRowBytes * height); 1.1456 + readDst = scratch.get(); 1.1457 + } 1.1458 + } 1.1459 + if (flipY && this->glCaps().packFlipYSupport()) { 1.1460 + GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); 1.1461 + } 1.1462 + GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, 1.1463 + readRect.fWidth, readRect.fHeight, 1.1464 + format, type, readDst)); 1.1465 + if (readDstRowBytes != tightRowBytes) { 1.1466 + SkASSERT(this->glCaps().packRowLengthSupport()); 1.1467 + GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 1.1468 + } 1.1469 + if (flipY && this->glCaps().packFlipYSupport()) { 1.1470 + GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); 1.1471 + flipY = false; 1.1472 + } 1.1473 + 1.1474 + // now reverse the order of the rows, since GL's are bottom-to-top, but our 1.1475 + // API presents top-to-bottom. We must preserve the padding contents. Note 1.1476 + // that the above readPixels did not overwrite the padding. 1.1477 + if (readDst == buffer) { 1.1478 + SkASSERT(rowBytes == readDstRowBytes); 1.1479 + if (flipY) { 1.1480 + scratch.reset(tightRowBytes); 1.1481 + void* tmpRow = scratch.get(); 1.1482 + // flip y in-place by rows 1.1483 + const int halfY = height >> 1; 1.1484 + char* top = reinterpret_cast<char*>(buffer); 1.1485 + char* bottom = top + (height - 1) * rowBytes; 1.1486 + for (int y = 0; y < halfY; y++) { 1.1487 + memcpy(tmpRow, top, tightRowBytes); 1.1488 + memcpy(top, bottom, tightRowBytes); 1.1489 + memcpy(bottom, tmpRow, tightRowBytes); 1.1490 + top += rowBytes; 1.1491 + bottom -= rowBytes; 1.1492 + } 1.1493 + } 1.1494 + } else { 1.1495 + SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes); 1.1496 + // copy from readDst to buffer while flipping y 1.1497 + // const int halfY = height >> 1; 1.1498 + const char* src = reinterpret_cast<const char*>(readDst); 1.1499 + char* dst = reinterpret_cast<char*>(buffer); 1.1500 + if (flipY) { 1.1501 + dst += (height-1) * rowBytes; 1.1502 + } 1.1503 + for (int y = 0; y < height; y++) { 1.1504 + memcpy(dst, src, tightRowBytes); 1.1505 + src += readDstRowBytes; 1.1506 + if (!flipY) { 1.1507 + dst += rowBytes; 1.1508 + } else { 1.1509 + dst -= rowBytes; 1.1510 + } 1.1511 + } 1.1512 + } 1.1513 + return true; 1.1514 +} 1.1515 + 1.1516 +void GrGpuGL::flushRenderTarget(const SkIRect* bound) { 1.1517 + 1.1518 + GrGLRenderTarget* rt = 1.1519 + static_cast<GrGLRenderTarget*>(this->drawState()->getRenderTarget()); 1.1520 + SkASSERT(NULL != rt); 1.1521 + 1.1522 + if (fHWBoundRenderTarget != rt) { 1.1523 + GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID())); 1.1524 +#ifdef SK_DEBUG 1.1525 + // don't do this check in Chromium -- this is causing 1.1526 + // lots of repeated command buffer flushes when the compositor is 1.1527 + // rendering with Ganesh, which is really slow; even too slow for 1.1528 + // Debug mode. 1.1529 + if (!this->glContext().isChromium()) { 1.1530 + GrGLenum status; 1.1531 + GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1.1532 + if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1.1533 + GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status); 1.1534 + } 1.1535 + } 1.1536 +#endif 1.1537 + fHWBoundRenderTarget = rt; 1.1538 + const GrGLIRect& vp = rt->getViewport(); 1.1539 + if (fHWViewport != vp) { 1.1540 + vp.pushToGLViewport(this->glInterface()); 1.1541 + fHWViewport = vp; 1.1542 + } 1.1543 + } 1.1544 + if (NULL == bound || !bound->isEmpty()) { 1.1545 + rt->flagAsNeedingResolve(bound); 1.1546 + } 1.1547 + 1.1548 + GrTexture *texture = rt->asTexture(); 1.1549 + if (texture) { 1.1550 + texture->dirtyMipMaps(true); 1.1551 + } 1.1552 +} 1.1553 + 1.1554 +GrGLenum gPrimitiveType2GLMode[] = { 1.1555 + GR_GL_TRIANGLES, 1.1556 + GR_GL_TRIANGLE_STRIP, 1.1557 + GR_GL_TRIANGLE_FAN, 1.1558 + GR_GL_POINTS, 1.1559 + GR_GL_LINES, 1.1560 + GR_GL_LINE_STRIP 1.1561 +}; 1.1562 + 1.1563 +#define SWAP_PER_DRAW 0 1.1564 + 1.1565 +#if SWAP_PER_DRAW 1.1566 + #if defined(SK_BUILD_FOR_MAC) 1.1567 + #include <AGL/agl.h> 1.1568 + #elif defined(SK_BUILD_FOR_WIN32) 1.1569 + #include <gl/GL.h> 1.1570 + void SwapBuf() { 1.1571 + DWORD procID = GetCurrentProcessId(); 1.1572 + HWND hwnd = GetTopWindow(GetDesktopWindow()); 1.1573 + while(hwnd) { 1.1574 + DWORD wndProcID = 0; 1.1575 + GetWindowThreadProcessId(hwnd, &wndProcID); 1.1576 + if(wndProcID == procID) { 1.1577 + SwapBuffers(GetDC(hwnd)); 1.1578 + } 1.1579 + hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 1.1580 + } 1.1581 + } 1.1582 + #endif 1.1583 +#endif 1.1584 + 1.1585 +void GrGpuGL::onGpuDraw(const DrawInfo& info) { 1.1586 + size_t indexOffsetInBytes; 1.1587 + this->setupGeometry(info, &indexOffsetInBytes); 1.1588 + 1.1589 + SkASSERT((size_t)info.primitiveType() < GR_ARRAY_COUNT(gPrimitiveType2GLMode)); 1.1590 + 1.1591 + if (info.isIndexed()) { 1.1592 + GrGLvoid* indices = 1.1593 + reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex()); 1.1594 + // info.startVertex() was accounted for by setupGeometry. 1.1595 + GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], 1.1596 + info.indexCount(), 1.1597 + GR_GL_UNSIGNED_SHORT, 1.1598 + indices)); 1.1599 + } else { 1.1600 + // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for 1.1601 + // startVertex in the DrawElements case. So we always rely on setupGeometry to have 1.1602 + // accounted for startVertex. 1.1603 + GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount())); 1.1604 + } 1.1605 +#if SWAP_PER_DRAW 1.1606 + glFlush(); 1.1607 + #if defined(SK_BUILD_FOR_MAC) 1.1608 + aglSwapBuffers(aglGetCurrentContext()); 1.1609 + int set_a_break_pt_here = 9; 1.1610 + aglSwapBuffers(aglGetCurrentContext()); 1.1611 + #elif defined(SK_BUILD_FOR_WIN32) 1.1612 + SwapBuf(); 1.1613 + int set_a_break_pt_here = 9; 1.1614 + SwapBuf(); 1.1615 + #endif 1.1616 +#endif 1.1617 +} 1.1618 + 1.1619 +static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) { 1.1620 + switch (op) { 1.1621 + default: 1.1622 + GrCrash("Unexpected path fill."); 1.1623 + /* fallthrough */; 1.1624 + case kIncClamp_StencilOp: 1.1625 + return GR_GL_COUNT_UP; 1.1626 + case kInvert_StencilOp: 1.1627 + return GR_GL_INVERT; 1.1628 + } 1.1629 +} 1.1630 + 1.1631 +void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) { 1.1632 + SkASSERT(this->caps()->pathRenderingSupport()); 1.1633 + 1.1634 + GrGLuint id = static_cast<const GrGLPath*>(path)->pathID(); 1.1635 + SkASSERT(NULL != this->drawState()->getRenderTarget()); 1.1636 + SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer()); 1.1637 + 1.1638 + flushPathStencilSettings(fill); 1.1639 + 1.1640 + // Decide how to manipulate the stencil buffer based on the fill rule. 1.1641 + SkASSERT(!fHWPathStencilSettings.isTwoSided()); 1.1642 + 1.1643 + GrGLenum fillMode = 1.1644 + gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face)); 1.1645 + GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face); 1.1646 + GL_CALL(StencilFillPath(id, fillMode, writeMask)); 1.1647 +} 1.1648 + 1.1649 +void GrGpuGL::onGpuDrawPath(const GrPath* path, SkPath::FillType fill) { 1.1650 + SkASSERT(this->caps()->pathRenderingSupport()); 1.1651 + 1.1652 + GrGLuint id = static_cast<const GrGLPath*>(path)->pathID(); 1.1653 + SkASSERT(NULL != this->drawState()->getRenderTarget()); 1.1654 + SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer()); 1.1655 + SkASSERT(!fCurrentProgram->hasVertexShader()); 1.1656 + 1.1657 + flushPathStencilSettings(fill); 1.1658 + const SkStrokeRec& stroke = path->getStroke(); 1.1659 + 1.1660 + SkPath::FillType nonInvertedFill = SkPath::ConvertToNonInverseFillType(fill); 1.1661 + SkASSERT(!fHWPathStencilSettings.isTwoSided()); 1.1662 + GrGLenum fillMode = 1.1663 + gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face)); 1.1664 + GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face); 1.1665 + 1.1666 + if (stroke.isFillStyle() || SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle()) { 1.1667 + GL_CALL(StencilFillPath(id, fillMode, writeMask)); 1.1668 + } 1.1669 + if (stroke.needToApply()) { 1.1670 + GL_CALL(StencilStrokePath(id, 0xffff, writeMask)); 1.1671 + } 1.1672 + 1.1673 + if (nonInvertedFill == fill) { 1.1674 + if (stroke.needToApply()) { 1.1675 + GL_CALL(CoverStrokePath(id, GR_GL_BOUNDING_BOX)); 1.1676 + } else { 1.1677 + GL_CALL(CoverFillPath(id, GR_GL_BOUNDING_BOX)); 1.1678 + } 1.1679 + } else { 1.1680 + GrDrawState* drawState = this->drawState(); 1.1681 + GrDrawState::AutoViewMatrixRestore avmr; 1.1682 + SkRect bounds = SkRect::MakeLTRB(0, 0, 1.1683 + SkIntToScalar(drawState->getRenderTarget()->width()), 1.1684 + SkIntToScalar(drawState->getRenderTarget()->height())); 1.1685 + SkMatrix vmi; 1.1686 + // mapRect through persp matrix may not be correct 1.1687 + if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) { 1.1688 + vmi.mapRect(&bounds); 1.1689 + // theoretically could set bloat = 0, instead leave it because of matrix inversion 1.1690 + // precision. 1.1691 + SkScalar bloat = drawState->getViewMatrix().getMaxStretch() * SK_ScalarHalf; 1.1692 + bounds.outset(bloat, bloat); 1.1693 + } else { 1.1694 + avmr.setIdentity(drawState); 1.1695 + } 1.1696 + 1.1697 + this->drawSimpleRect(bounds, NULL); 1.1698 + } 1.1699 +} 1.1700 + 1.1701 +void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) { 1.1702 + GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 1.1703 + if (rt->needsResolve()) { 1.1704 + // Some extensions automatically resolves the texture when it is read. 1.1705 + if (this->glCaps().usesMSAARenderBuffers()) { 1.1706 + SkASSERT(rt->textureFBOID() != rt->renderFBOID()); 1.1707 + GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); 1.1708 + GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())); 1.1709 + // make sure we go through flushRenderTarget() since we've modified 1.1710 + // the bound DRAW FBO ID. 1.1711 + fHWBoundRenderTarget = NULL; 1.1712 + const GrGLIRect& vp = rt->getViewport(); 1.1713 + const SkIRect dirtyRect = rt->getResolveRect(); 1.1714 + GrGLIRect r; 1.1715 + r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, 1.1716 + dirtyRect.width(), dirtyRect.height(), target->origin()); 1.1717 + 1.1718 + GrAutoTRestore<ScissorState> asr; 1.1719 + if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { 1.1720 + // Apple's extension uses the scissor as the blit bounds. 1.1721 + asr.reset(&fScissorState); 1.1722 + fScissorState.fEnabled = true; 1.1723 + fScissorState.fRect = dirtyRect; 1.1724 + this->flushScissor(); 1.1725 + GL_CALL(ResolveMultisampleFramebuffer()); 1.1726 + } else { 1.1727 + if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { 1.1728 + // this respects the scissor during the blit, so disable it. 1.1729 + asr.reset(&fScissorState); 1.1730 + fScissorState.fEnabled = false; 1.1731 + this->flushScissor(); 1.1732 + } 1.1733 + int right = r.fLeft + r.fWidth; 1.1734 + int top = r.fBottom + r.fHeight; 1.1735 + GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, 1.1736 + r.fLeft, r.fBottom, right, top, 1.1737 + GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 1.1738 + } 1.1739 + } 1.1740 + rt->flagAsResolved(); 1.1741 + } 1.1742 +} 1.1743 + 1.1744 +namespace { 1.1745 + 1.1746 +GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) { 1.1747 + static const GrGLenum gTable[] = { 1.1748 + GR_GL_ALWAYS, // kAlways_StencilFunc 1.1749 + GR_GL_NEVER, // kNever_StencilFunc 1.1750 + GR_GL_GREATER, // kGreater_StencilFunc 1.1751 + GR_GL_GEQUAL, // kGEqual_StencilFunc 1.1752 + GR_GL_LESS, // kLess_StencilFunc 1.1753 + GR_GL_LEQUAL, // kLEqual_StencilFunc, 1.1754 + GR_GL_EQUAL, // kEqual_StencilFunc, 1.1755 + GR_GL_NOTEQUAL, // kNotEqual_StencilFunc, 1.1756 + }; 1.1757 + GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kBasicStencilFuncCount); 1.1758 + GR_STATIC_ASSERT(0 == kAlways_StencilFunc); 1.1759 + GR_STATIC_ASSERT(1 == kNever_StencilFunc); 1.1760 + GR_STATIC_ASSERT(2 == kGreater_StencilFunc); 1.1761 + GR_STATIC_ASSERT(3 == kGEqual_StencilFunc); 1.1762 + GR_STATIC_ASSERT(4 == kLess_StencilFunc); 1.1763 + GR_STATIC_ASSERT(5 == kLEqual_StencilFunc); 1.1764 + GR_STATIC_ASSERT(6 == kEqual_StencilFunc); 1.1765 + GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc); 1.1766 + SkASSERT((unsigned) basicFunc < kBasicStencilFuncCount); 1.1767 + 1.1768 + return gTable[basicFunc]; 1.1769 +} 1.1770 + 1.1771 +GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { 1.1772 + static const GrGLenum gTable[] = { 1.1773 + GR_GL_KEEP, // kKeep_StencilOp 1.1774 + GR_GL_REPLACE, // kReplace_StencilOp 1.1775 + GR_GL_INCR_WRAP, // kIncWrap_StencilOp 1.1776 + GR_GL_INCR, // kIncClamp_StencilOp 1.1777 + GR_GL_DECR_WRAP, // kDecWrap_StencilOp 1.1778 + GR_GL_DECR, // kDecClamp_StencilOp 1.1779 + GR_GL_ZERO, // kZero_StencilOp 1.1780 + GR_GL_INVERT, // kInvert_StencilOp 1.1781 + }; 1.1782 + GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kStencilOpCount); 1.1783 + GR_STATIC_ASSERT(0 == kKeep_StencilOp); 1.1784 + GR_STATIC_ASSERT(1 == kReplace_StencilOp); 1.1785 + GR_STATIC_ASSERT(2 == kIncWrap_StencilOp); 1.1786 + GR_STATIC_ASSERT(3 == kIncClamp_StencilOp); 1.1787 + GR_STATIC_ASSERT(4 == kDecWrap_StencilOp); 1.1788 + GR_STATIC_ASSERT(5 == kDecClamp_StencilOp); 1.1789 + GR_STATIC_ASSERT(6 == kZero_StencilOp); 1.1790 + GR_STATIC_ASSERT(7 == kInvert_StencilOp); 1.1791 + SkASSERT((unsigned) op < kStencilOpCount); 1.1792 + return gTable[op]; 1.1793 +} 1.1794 + 1.1795 +void set_gl_stencil(const GrGLInterface* gl, 1.1796 + const GrStencilSettings& settings, 1.1797 + GrGLenum glFace, 1.1798 + GrStencilSettings::Face grFace) { 1.1799 + GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace)); 1.1800 + GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace)); 1.1801 + GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace)); 1.1802 + 1.1803 + GrGLint ref = settings.funcRef(grFace); 1.1804 + GrGLint mask = settings.funcMask(grFace); 1.1805 + GrGLint writeMask = settings.writeMask(grFace); 1.1806 + 1.1807 + if (GR_GL_FRONT_AND_BACK == glFace) { 1.1808 + // we call the combined func just in case separate stencil is not 1.1809 + // supported. 1.1810 + GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); 1.1811 + GR_GL_CALL(gl, StencilMask(writeMask)); 1.1812 + GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); 1.1813 + } else { 1.1814 + GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 1.1815 + GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 1.1816 + GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); 1.1817 + } 1.1818 +} 1.1819 +} 1.1820 + 1.1821 +void GrGpuGL::flushStencil(DrawType type) { 1.1822 + if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) { 1.1823 + if (fStencilSettings.isDisabled()) { 1.1824 + if (kNo_TriState != fHWStencilTestEnabled) { 1.1825 + GL_CALL(Disable(GR_GL_STENCIL_TEST)); 1.1826 + fHWStencilTestEnabled = kNo_TriState; 1.1827 + } 1.1828 + } else { 1.1829 + if (kYes_TriState != fHWStencilTestEnabled) { 1.1830 + GL_CALL(Enable(GR_GL_STENCIL_TEST)); 1.1831 + fHWStencilTestEnabled = kYes_TriState; 1.1832 + } 1.1833 + } 1.1834 + if (!fStencilSettings.isDisabled()) { 1.1835 + if (this->caps()->twoSidedStencilSupport()) { 1.1836 + set_gl_stencil(this->glInterface(), 1.1837 + fStencilSettings, 1.1838 + GR_GL_FRONT, 1.1839 + GrStencilSettings::kFront_Face); 1.1840 + set_gl_stencil(this->glInterface(), 1.1841 + fStencilSettings, 1.1842 + GR_GL_BACK, 1.1843 + GrStencilSettings::kBack_Face); 1.1844 + } else { 1.1845 + set_gl_stencil(this->glInterface(), 1.1846 + fStencilSettings, 1.1847 + GR_GL_FRONT_AND_BACK, 1.1848 + GrStencilSettings::kFront_Face); 1.1849 + } 1.1850 + } 1.1851 + fHWStencilSettings = fStencilSettings; 1.1852 + } 1.1853 +} 1.1854 + 1.1855 +void GrGpuGL::flushAAState(DrawType type) { 1.1856 +// At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but 1.1857 +// the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide. 1.1858 +#if 0 1.1859 + // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern 1.1860 + #define RT_HAS_MSAA rt->isMultisampled() 1.1861 +#else 1.1862 + #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type) 1.1863 +#endif 1.1864 + 1.1865 + const GrRenderTarget* rt = this->getDrawState().getRenderTarget(); 1.1866 + if (kGL_GrGLStandard == this->glStandard()) { 1.1867 + // ES doesn't support toggling GL_MULTISAMPLE and doesn't have 1.1868 + // smooth lines. 1.1869 + // we prefer smooth lines over multisampled lines 1.1870 + bool smoothLines = false; 1.1871 + 1.1872 + if (kDrawLines_DrawType == type) { 1.1873 + smoothLines = this->willUseHWAALines(); 1.1874 + if (smoothLines) { 1.1875 + if (kYes_TriState != fHWAAState.fSmoothLineEnabled) { 1.1876 + GL_CALL(Enable(GR_GL_LINE_SMOOTH)); 1.1877 + fHWAAState.fSmoothLineEnabled = kYes_TriState; 1.1878 + // must disable msaa to use line smoothing 1.1879 + if (RT_HAS_MSAA && 1.1880 + kNo_TriState != fHWAAState.fMSAAEnabled) { 1.1881 + GL_CALL(Disable(GR_GL_MULTISAMPLE)); 1.1882 + fHWAAState.fMSAAEnabled = kNo_TriState; 1.1883 + } 1.1884 + } 1.1885 + } else { 1.1886 + if (kNo_TriState != fHWAAState.fSmoothLineEnabled) { 1.1887 + GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 1.1888 + fHWAAState.fSmoothLineEnabled = kNo_TriState; 1.1889 + } 1.1890 + } 1.1891 + } 1.1892 + if (!smoothLines && RT_HAS_MSAA) { 1.1893 + // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths 1.1894 + // convex hulls of each segment appear to get filled. 1.1895 + bool enableMSAA = kStencilPath_DrawType == type || 1.1896 + this->getDrawState().isHWAntialiasState(); 1.1897 + if (enableMSAA) { 1.1898 + if (kYes_TriState != fHWAAState.fMSAAEnabled) { 1.1899 + GL_CALL(Enable(GR_GL_MULTISAMPLE)); 1.1900 + fHWAAState.fMSAAEnabled = kYes_TriState; 1.1901 + } 1.1902 + } else { 1.1903 + if (kNo_TriState != fHWAAState.fMSAAEnabled) { 1.1904 + GL_CALL(Disable(GR_GL_MULTISAMPLE)); 1.1905 + fHWAAState.fMSAAEnabled = kNo_TriState; 1.1906 + } 1.1907 + } 1.1908 + } 1.1909 + } 1.1910 +} 1.1911 + 1.1912 +void GrGpuGL::flushPathStencilSettings(SkPath::FillType fill) { 1.1913 + GrStencilSettings pathStencilSettings; 1.1914 + this->getPathStencilSettingsForFillType(fill, &pathStencilSettings); 1.1915 + if (fHWPathStencilSettings != pathStencilSettings) { 1.1916 + // Just the func, ref, and mask is set here. The op and write mask are params to the call 1.1917 + // that draws the path to the SB (glStencilFillPath) 1.1918 + GrGLenum func = 1.1919 + gr_to_gl_stencil_func(pathStencilSettings.func(GrStencilSettings::kFront_Face)); 1.1920 + GL_CALL(PathStencilFunc(func, 1.1921 + pathStencilSettings.funcRef(GrStencilSettings::kFront_Face), 1.1922 + pathStencilSettings.funcMask(GrStencilSettings::kFront_Face))); 1.1923 + 1.1924 + fHWPathStencilSettings = pathStencilSettings; 1.1925 + } 1.1926 +} 1.1927 + 1.1928 +void GrGpuGL::flushBlend(bool isLines, 1.1929 + GrBlendCoeff srcCoeff, 1.1930 + GrBlendCoeff dstCoeff) { 1.1931 + if (isLines && this->willUseHWAALines()) { 1.1932 + if (kYes_TriState != fHWBlendState.fEnabled) { 1.1933 + GL_CALL(Enable(GR_GL_BLEND)); 1.1934 + fHWBlendState.fEnabled = kYes_TriState; 1.1935 + } 1.1936 + if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff || 1.1937 + kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) { 1.1938 + GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff], 1.1939 + gXfermodeCoeff2Blend[kISA_GrBlendCoeff])); 1.1940 + fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff; 1.1941 + fHWBlendState.fDstCoeff = kISA_GrBlendCoeff; 1.1942 + } 1.1943 + } else { 1.1944 + // any optimization to disable blending should 1.1945 + // have already been applied and tweaked the coeffs 1.1946 + // to (1, 0). 1.1947 + bool blendOff = kOne_GrBlendCoeff == srcCoeff && 1.1948 + kZero_GrBlendCoeff == dstCoeff; 1.1949 + if (blendOff) { 1.1950 + if (kNo_TriState != fHWBlendState.fEnabled) { 1.1951 + GL_CALL(Disable(GR_GL_BLEND)); 1.1952 + fHWBlendState.fEnabled = kNo_TriState; 1.1953 + } 1.1954 + } else { 1.1955 + if (kYes_TriState != fHWBlendState.fEnabled) { 1.1956 + GL_CALL(Enable(GR_GL_BLEND)); 1.1957 + fHWBlendState.fEnabled = kYes_TriState; 1.1958 + } 1.1959 + if (fHWBlendState.fSrcCoeff != srcCoeff || 1.1960 + fHWBlendState.fDstCoeff != dstCoeff) { 1.1961 + GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], 1.1962 + gXfermodeCoeff2Blend[dstCoeff])); 1.1963 + fHWBlendState.fSrcCoeff = srcCoeff; 1.1964 + fHWBlendState.fDstCoeff = dstCoeff; 1.1965 + } 1.1966 + GrColor blendConst = this->getDrawState().getBlendConstant(); 1.1967 + if ((BlendCoeffReferencesConstant(srcCoeff) || 1.1968 + BlendCoeffReferencesConstant(dstCoeff)) && 1.1969 + (!fHWBlendState.fConstColorValid || 1.1970 + fHWBlendState.fConstColor != blendConst)) { 1.1971 + GrGLfloat c[4]; 1.1972 + GrColorToRGBAFloat(blendConst, c); 1.1973 + GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); 1.1974 + fHWBlendState.fConstColor = blendConst; 1.1975 + fHWBlendState.fConstColorValid = true; 1.1976 + } 1.1977 + } 1.1978 + } 1.1979 +} 1.1980 + 1.1981 +static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { 1.1982 + static const GrGLenum gWrapModes[] = { 1.1983 + GR_GL_CLAMP_TO_EDGE, 1.1984 + GR_GL_REPEAT, 1.1985 + GR_GL_MIRRORED_REPEAT 1.1986 + }; 1.1987 + GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); 1.1988 + GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); 1.1989 + GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); 1.1990 + GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); 1.1991 + return gWrapModes[tm]; 1.1992 +} 1.1993 + 1.1994 +void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) { 1.1995 + SkASSERT(NULL != texture); 1.1996 + 1.1997 + // If we created a rt/tex and rendered to it without using a texture and now we're texturing 1.1998 + // from the rt it will still be the last bound texture, but it needs resolving. So keep this 1.1999 + // out of the "last != next" check. 1.2000 + GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 1.2001 + if (NULL != texRT) { 1.2002 + this->onResolveRenderTarget(texRT); 1.2003 + } 1.2004 + 1.2005 + if (fHWBoundTextures[unitIdx] != texture) { 1.2006 + this->setTextureUnit(unitIdx); 1.2007 + GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID())); 1.2008 + fHWBoundTextures[unitIdx] = texture; 1.2009 + } 1.2010 + 1.2011 + ResetTimestamp timestamp; 1.2012 + const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); 1.2013 + bool setAll = timestamp < this->getResetTimestamp(); 1.2014 + GrGLTexture::TexParams newTexParams; 1.2015 + 1.2016 + static GrGLenum glMinFilterModes[] = { 1.2017 + GR_GL_NEAREST, 1.2018 + GR_GL_LINEAR, 1.2019 + GR_GL_LINEAR_MIPMAP_LINEAR 1.2020 + }; 1.2021 + static GrGLenum glMagFilterModes[] = { 1.2022 + GR_GL_NEAREST, 1.2023 + GR_GL_LINEAR, 1.2024 + GR_GL_LINEAR 1.2025 + }; 1.2026 + GrTextureParams::FilterMode filterMode = params.filterMode(); 1.2027 + if (!this->caps()->mipMapSupport() && GrTextureParams::kMipMap_FilterMode == filterMode) { 1.2028 + filterMode = GrTextureParams::kBilerp_FilterMode; 1.2029 + } 1.2030 + newTexParams.fMinFilter = glMinFilterModes[filterMode]; 1.2031 + newTexParams.fMagFilter = glMagFilterModes[filterMode]; 1.2032 + 1.2033 + if (GrTextureParams::kMipMap_FilterMode == filterMode && texture->mipMapsAreDirty()) { 1.2034 +// GL_CALL(Hint(GR_GL_GENERATE_MIPMAP_HINT,GR_GL_NICEST)); 1.2035 + GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D)); 1.2036 + texture->dirtyMipMaps(false); 1.2037 + } 1.2038 + 1.2039 + newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); 1.2040 + newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); 1.2041 + memcpy(newTexParams.fSwizzleRGBA, 1.2042 + GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()), 1.2043 + sizeof(newTexParams.fSwizzleRGBA)); 1.2044 + if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { 1.2045 + this->setTextureUnit(unitIdx); 1.2046 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.2047 + GR_GL_TEXTURE_MAG_FILTER, 1.2048 + newTexParams.fMagFilter)); 1.2049 + } 1.2050 + if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { 1.2051 + this->setTextureUnit(unitIdx); 1.2052 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.2053 + GR_GL_TEXTURE_MIN_FILTER, 1.2054 + newTexParams.fMinFilter)); 1.2055 + } 1.2056 + if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { 1.2057 + this->setTextureUnit(unitIdx); 1.2058 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.2059 + GR_GL_TEXTURE_WRAP_S, 1.2060 + newTexParams.fWrapS)); 1.2061 + } 1.2062 + if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { 1.2063 + this->setTextureUnit(unitIdx); 1.2064 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1.2065 + GR_GL_TEXTURE_WRAP_T, 1.2066 + newTexParams.fWrapT)); 1.2067 + } 1.2068 + if (this->glCaps().textureSwizzleSupport() && 1.2069 + (setAll || memcmp(newTexParams.fSwizzleRGBA, 1.2070 + oldTexParams.fSwizzleRGBA, 1.2071 + sizeof(newTexParams.fSwizzleRGBA)))) { 1.2072 + this->setTextureUnit(unitIdx); 1.2073 + if (this->glStandard() == kGLES_GrGLStandard) { 1.2074 + // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. 1.2075 + const GrGLenum* swizzle = newTexParams.fSwizzleRGBA; 1.2076 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0])); 1.2077 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1])); 1.2078 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2])); 1.2079 + GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3])); 1.2080 + } else { 1.2081 + GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint)); 1.2082 + const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA); 1.2083 + GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle)); 1.2084 + } 1.2085 + } 1.2086 + texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); 1.2087 +} 1.2088 + 1.2089 +void GrGpuGL::setProjectionMatrix(const SkMatrix& matrix, 1.2090 + const SkISize& renderTargetSize, 1.2091 + GrSurfaceOrigin renderTargetOrigin) { 1.2092 + 1.2093 + SkASSERT(this->glCaps().fixedFunctionSupport()); 1.2094 + 1.2095 + if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin && 1.2096 + renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize && 1.2097 + matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) { 1.2098 + return; 1.2099 + } 1.2100 + 1.2101 + fHWProjectionMatrixState.fViewMatrix = matrix; 1.2102 + fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize; 1.2103 + fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin; 1.2104 + 1.2105 + GrGLfloat glMatrix[4 * 4]; 1.2106 + fHWProjectionMatrixState.getGLMatrix<4>(glMatrix); 1.2107 + GL_CALL(MatrixMode(GR_GL_PROJECTION)); 1.2108 + GL_CALL(LoadMatrixf(glMatrix)); 1.2109 +} 1.2110 + 1.2111 +void GrGpuGL::enableTexGen(int unitIdx, 1.2112 + TexGenComponents components, 1.2113 + const GrGLfloat* coefficients) { 1.2114 + SkASSERT(this->glCaps().fixedFunctionSupport()); 1.2115 + SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents); 1.2116 + SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= unitIdx); 1.2117 + 1.2118 + if (GR_GL_OBJECT_LINEAR == fHWTexGenSettings[unitIdx].fMode && 1.2119 + components == fHWTexGenSettings[unitIdx].fNumComponents && 1.2120 + !memcmp(coefficients, fHWTexGenSettings[unitIdx].fCoefficients, 1.2121 + 3 * components * sizeof(GrGLfloat))) { 1.2122 + return; 1.2123 + } 1.2124 + 1.2125 + this->setTextureUnit(unitIdx); 1.2126 + 1.2127 + if (GR_GL_OBJECT_LINEAR != fHWTexGenSettings[unitIdx].fMode) { 1.2128 + for (int i = 0; i < 4; i++) { 1.2129 + GL_CALL(TexGeni(GR_GL_S + i, GR_GL_TEXTURE_GEN_MODE, GR_GL_OBJECT_LINEAR)); 1.2130 + } 1.2131 + fHWTexGenSettings[unitIdx].fMode = GR_GL_OBJECT_LINEAR; 1.2132 + } 1.2133 + 1.2134 + for (int i = fHWTexGenSettings[unitIdx].fNumComponents; i < components; i++) { 1.2135 + GL_CALL(Enable(GR_GL_TEXTURE_GEN_S + i)); 1.2136 + } 1.2137 + for (int i = components; i < fHWTexGenSettings[unitIdx].fNumComponents; i++) { 1.2138 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + i)); 1.2139 + } 1.2140 + fHWTexGenSettings[unitIdx].fNumComponents = components; 1.2141 + 1.2142 + for (int i = 0; i < components; i++) { 1.2143 + GrGLfloat plane[] = {coefficients[0 + 3 * i], 1.2144 + coefficients[1 + 3 * i], 1.2145 + 0, 1.2146 + coefficients[2 + 3 * i]}; 1.2147 + GL_CALL(TexGenfv(GR_GL_S + i, GR_GL_OBJECT_PLANE, plane)); 1.2148 + } 1.2149 + 1.2150 + if (this->caps()->pathRenderingSupport()) { 1.2151 + GL_CALL(PathTexGen(GR_GL_TEXTURE0 + unitIdx, 1.2152 + GR_GL_OBJECT_LINEAR, 1.2153 + components, 1.2154 + coefficients)); 1.2155 + } 1.2156 + 1.2157 + memcpy(fHWTexGenSettings[unitIdx].fCoefficients, coefficients, 1.2158 + 3 * components * sizeof(GrGLfloat)); 1.2159 +} 1.2160 + 1.2161 +void GrGpuGL::enableTexGen(int unitIdx, TexGenComponents components, const SkMatrix& matrix) { 1.2162 + GrGLfloat coefficients[3 * 3]; 1.2163 + SkASSERT(this->glCaps().fixedFunctionSupport()); 1.2164 + SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents); 1.2165 + 1.2166 + coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]); 1.2167 + coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]); 1.2168 + coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]); 1.2169 + 1.2170 + if (components >= kST_TexGenComponents) { 1.2171 + coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]); 1.2172 + coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]); 1.2173 + coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]); 1.2174 + } 1.2175 + 1.2176 + if (components >= kSTR_TexGenComponents) { 1.2177 + coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]); 1.2178 + coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]); 1.2179 + coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]); 1.2180 + } 1.2181 + 1.2182 + enableTexGen(unitIdx, components, coefficients); 1.2183 +} 1.2184 + 1.2185 +void GrGpuGL::flushTexGenSettings(int numUsedTexCoordSets) { 1.2186 + SkASSERT(this->glCaps().fixedFunctionSupport()); 1.2187 + SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= numUsedTexCoordSets); 1.2188 + 1.2189 + // Only write the inactive tex gens, since active tex gens were written 1.2190 + // when they were enabled. 1.2191 + 1.2192 + SkDEBUGCODE( 1.2193 + for (int i = 0; i < numUsedTexCoordSets; i++) { 1.2194 + SkASSERT(0 != fHWTexGenSettings[i].fNumComponents); 1.2195 + } 1.2196 + ); 1.2197 + 1.2198 + for (int i = numUsedTexCoordSets; i < fHWActiveTexGenSets; i++) { 1.2199 + SkASSERT(0 != fHWTexGenSettings[i].fNumComponents); 1.2200 + 1.2201 + this->setTextureUnit(i); 1.2202 + for (int j = 0; j < fHWTexGenSettings[i].fNumComponents; j++) { 1.2203 + GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + j)); 1.2204 + } 1.2205 + 1.2206 + if (this->caps()->pathRenderingSupport()) { 1.2207 + GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL)); 1.2208 + } 1.2209 + 1.2210 + fHWTexGenSettings[i].fNumComponents = 0; 1.2211 + } 1.2212 + 1.2213 + fHWActiveTexGenSets = numUsedTexCoordSets; 1.2214 +} 1.2215 + 1.2216 +void GrGpuGL::flushMiscFixedFunctionState() { 1.2217 + 1.2218 + const GrDrawState& drawState = this->getDrawState(); 1.2219 + 1.2220 + if (drawState.isDitherState()) { 1.2221 + if (kYes_TriState != fHWDitherEnabled) { 1.2222 + GL_CALL(Enable(GR_GL_DITHER)); 1.2223 + fHWDitherEnabled = kYes_TriState; 1.2224 + } 1.2225 + } else { 1.2226 + if (kNo_TriState != fHWDitherEnabled) { 1.2227 + GL_CALL(Disable(GR_GL_DITHER)); 1.2228 + fHWDitherEnabled = kNo_TriState; 1.2229 + } 1.2230 + } 1.2231 + 1.2232 + if (drawState.isColorWriteDisabled()) { 1.2233 + if (kNo_TriState != fHWWriteToColor) { 1.2234 + GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, 1.2235 + GR_GL_FALSE, GR_GL_FALSE)); 1.2236 + fHWWriteToColor = kNo_TriState; 1.2237 + } 1.2238 + } else { 1.2239 + if (kYes_TriState != fHWWriteToColor) { 1.2240 + GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 1.2241 + fHWWriteToColor = kYes_TriState; 1.2242 + } 1.2243 + } 1.2244 + 1.2245 + if (fHWDrawFace != drawState.getDrawFace()) { 1.2246 + switch (this->getDrawState().getDrawFace()) { 1.2247 + case GrDrawState::kCCW_DrawFace: 1.2248 + GL_CALL(Enable(GR_GL_CULL_FACE)); 1.2249 + GL_CALL(CullFace(GR_GL_BACK)); 1.2250 + break; 1.2251 + case GrDrawState::kCW_DrawFace: 1.2252 + GL_CALL(Enable(GR_GL_CULL_FACE)); 1.2253 + GL_CALL(CullFace(GR_GL_FRONT)); 1.2254 + break; 1.2255 + case GrDrawState::kBoth_DrawFace: 1.2256 + GL_CALL(Disable(GR_GL_CULL_FACE)); 1.2257 + break; 1.2258 + default: 1.2259 + GrCrash("Unknown draw face."); 1.2260 + } 1.2261 + fHWDrawFace = drawState.getDrawFace(); 1.2262 + } 1.2263 +} 1.2264 + 1.2265 +void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) { 1.2266 + SkASSERT(NULL != renderTarget); 1.2267 + if (fHWBoundRenderTarget == renderTarget) { 1.2268 + fHWBoundRenderTarget = NULL; 1.2269 + } 1.2270 +} 1.2271 + 1.2272 +void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) { 1.2273 + for (int s = 0; s < fHWBoundTextures.count(); ++s) { 1.2274 + if (fHWBoundTextures[s] == texture) { 1.2275 + // deleting bound texture does implied bind to 0 1.2276 + fHWBoundTextures[s] = NULL; 1.2277 + } 1.2278 + } 1.2279 +} 1.2280 + 1.2281 +bool GrGpuGL::configToGLFormats(GrPixelConfig config, 1.2282 + bool getSizedInternalFormat, 1.2283 + GrGLenum* internalFormat, 1.2284 + GrGLenum* externalFormat, 1.2285 + GrGLenum* externalType) { 1.2286 + GrGLenum dontCare; 1.2287 + if (NULL == internalFormat) { 1.2288 + internalFormat = &dontCare; 1.2289 + } 1.2290 + if (NULL == externalFormat) { 1.2291 + externalFormat = &dontCare; 1.2292 + } 1.2293 + if (NULL == externalType) { 1.2294 + externalType = &dontCare; 1.2295 + } 1.2296 + 1.2297 + switch (config) { 1.2298 + case kRGBA_8888_GrPixelConfig: 1.2299 + *internalFormat = GR_GL_RGBA; 1.2300 + *externalFormat = GR_GL_RGBA; 1.2301 + if (getSizedInternalFormat) { 1.2302 + *internalFormat = GR_GL_RGBA8; 1.2303 + } else { 1.2304 + *internalFormat = GR_GL_RGBA; 1.2305 + } 1.2306 + *externalType = GR_GL_UNSIGNED_BYTE; 1.2307 + break; 1.2308 + case kBGRA_8888_GrPixelConfig: 1.2309 + if (!this->glCaps().bgraFormatSupport()) { 1.2310 + return false; 1.2311 + } 1.2312 + if (this->glCaps().bgraIsInternalFormat()) { 1.2313 + if (getSizedInternalFormat) { 1.2314 + *internalFormat = GR_GL_BGRA8; 1.2315 + } else { 1.2316 + *internalFormat = GR_GL_BGRA; 1.2317 + } 1.2318 + } else { 1.2319 + if (getSizedInternalFormat) { 1.2320 + *internalFormat = GR_GL_RGBA8; 1.2321 + } else { 1.2322 + *internalFormat = GR_GL_RGBA; 1.2323 + } 1.2324 + } 1.2325 + *externalFormat = GR_GL_BGRA; 1.2326 + *externalType = GR_GL_UNSIGNED_BYTE; 1.2327 + break; 1.2328 + case kRGB_565_GrPixelConfig: 1.2329 + *internalFormat = GR_GL_RGB; 1.2330 + *externalFormat = GR_GL_RGB; 1.2331 + if (getSizedInternalFormat) { 1.2332 + if (this->glStandard() == kGL_GrGLStandard) { 1.2333 + return false; 1.2334 + } else { 1.2335 + *internalFormat = GR_GL_RGB565; 1.2336 + } 1.2337 + } else { 1.2338 + *internalFormat = GR_GL_RGB; 1.2339 + } 1.2340 + *externalType = GR_GL_UNSIGNED_SHORT_5_6_5; 1.2341 + break; 1.2342 + case kRGBA_4444_GrPixelConfig: 1.2343 + *internalFormat = GR_GL_RGBA; 1.2344 + *externalFormat = GR_GL_RGBA; 1.2345 + if (getSizedInternalFormat) { 1.2346 + *internalFormat = GR_GL_RGBA4; 1.2347 + } else { 1.2348 + *internalFormat = GR_GL_RGBA; 1.2349 + } 1.2350 + *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4; 1.2351 + break; 1.2352 + case kIndex_8_GrPixelConfig: 1.2353 + if (this->caps()->eightBitPaletteSupport()) { 1.2354 + *internalFormat = GR_GL_PALETTE8_RGBA8; 1.2355 + // glCompressedTexImage doesn't take external params 1.2356 + *externalFormat = GR_GL_PALETTE8_RGBA8; 1.2357 + // no sized/unsized internal format distinction here 1.2358 + *internalFormat = GR_GL_PALETTE8_RGBA8; 1.2359 + // unused with CompressedTexImage 1.2360 + *externalType = GR_GL_UNSIGNED_BYTE; 1.2361 + } else { 1.2362 + return false; 1.2363 + } 1.2364 + break; 1.2365 + case kAlpha_8_GrPixelConfig: 1.2366 + if (this->glCaps().textureRedSupport()) { 1.2367 + *internalFormat = GR_GL_RED; 1.2368 + *externalFormat = GR_GL_RED; 1.2369 + if (getSizedInternalFormat) { 1.2370 + *internalFormat = GR_GL_R8; 1.2371 + } else { 1.2372 + *internalFormat = GR_GL_RED; 1.2373 + } 1.2374 + *externalType = GR_GL_UNSIGNED_BYTE; 1.2375 + } else { 1.2376 + *internalFormat = GR_GL_ALPHA; 1.2377 + *externalFormat = GR_GL_ALPHA; 1.2378 + if (getSizedInternalFormat) { 1.2379 + *internalFormat = GR_GL_ALPHA8; 1.2380 + } else { 1.2381 + *internalFormat = GR_GL_ALPHA; 1.2382 + } 1.2383 + *externalType = GR_GL_UNSIGNED_BYTE; 1.2384 + } 1.2385 + break; 1.2386 + default: 1.2387 + return false; 1.2388 + } 1.2389 + return true; 1.2390 +} 1.2391 + 1.2392 +void GrGpuGL::setTextureUnit(int unit) { 1.2393 + SkASSERT(unit >= 0 && unit < fHWBoundTextures.count()); 1.2394 + if (unit != fHWActiveTextureUnitIdx) { 1.2395 + GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 1.2396 + fHWActiveTextureUnitIdx = unit; 1.2397 + } 1.2398 +} 1.2399 + 1.2400 +void GrGpuGL::setScratchTextureUnit() { 1.2401 + // Bind the last texture unit since it is the least likely to be used by GrGLProgram. 1.2402 + int lastUnitIdx = fHWBoundTextures.count() - 1; 1.2403 + if (lastUnitIdx != fHWActiveTextureUnitIdx) { 1.2404 + GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 1.2405 + fHWActiveTextureUnitIdx = lastUnitIdx; 1.2406 + } 1.2407 + // clear out the this field so that if a program does use this unit it will rebind the correct 1.2408 + // texture. 1.2409 + fHWBoundTextures[lastUnitIdx] = NULL; 1.2410 +} 1.2411 + 1.2412 +namespace { 1.2413 +// Determines whether glBlitFramebuffer could be used between src and dst. 1.2414 +inline bool can_blit_framebuffer(const GrSurface* dst, 1.2415 + const GrSurface* src, 1.2416 + const GrGpuGL* gpu, 1.2417 + bool* wouldNeedTempFBO = NULL) { 1.2418 + if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) && 1.2419 + gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 1.2420 + gpu->glCaps().usesMSAARenderBuffers()) { 1.2421 + // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match 1.2422 + // or the rects are not the same (not just the same size but have the same edges). 1.2423 + if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && 1.2424 + (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { 1.2425 + return false; 1.2426 + } 1.2427 + if (NULL != wouldNeedTempFBO) { 1.2428 + *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget(); 1.2429 + } 1.2430 + return true; 1.2431 + } else { 1.2432 + return false; 1.2433 + } 1.2434 +} 1.2435 + 1.2436 +inline bool can_copy_texsubimage(const GrSurface* dst, 1.2437 + const GrSurface* src, 1.2438 + const GrGpuGL* gpu, 1.2439 + bool* wouldNeedTempFBO = NULL) { 1.2440 + // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage 1.2441 + // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps 1.2442 + // many drivers would allow it to work, but ANGLE does not. 1.2443 + if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() && 1.2444 + (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) { 1.2445 + return false; 1.2446 + } 1.2447 + const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); 1.2448 + // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer) 1.2449 + // then we don't want to copy to the texture but to the MSAA buffer. 1.2450 + if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { 1.2451 + return false; 1.2452 + } 1.2453 + const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 1.2454 + // If the src is multisampled (and uses an extension where there is a separate MSAA 1.2455 + // renderbuffer) then it is an invalid operation to call CopyTexSubImage 1.2456 + if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 1.2457 + return false; 1.2458 + } 1.2459 + if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 1.2460 + NULL != dst->asTexture() && 1.2461 + dst->origin() == src->origin() && 1.2462 + kIndex_8_GrPixelConfig != src->config()) { 1.2463 + if (NULL != wouldNeedTempFBO) { 1.2464 + *wouldNeedTempFBO = NULL == src->asRenderTarget(); 1.2465 + } 1.2466 + return true; 1.2467 + } else { 1.2468 + return false; 1.2469 + } 1.2470 +} 1.2471 + 1.2472 +// If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is 1.2473 +// relative to is output. 1.2474 +inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl, 1.2475 + GrSurface* surface, 1.2476 + GrGLenum fboTarget, 1.2477 + GrGLIRect* viewport) { 1.2478 + GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 1.2479 + GrGLuint tempFBOID; 1.2480 + if (NULL == rt) { 1.2481 + SkASSERT(NULL != surface->asTexture()); 1.2482 + GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID(); 1.2483 + GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID)); 1.2484 + GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID)); 1.2485 + GR_GL_CALL(gl, FramebufferTexture2D(fboTarget, 1.2486 + GR_GL_COLOR_ATTACHMENT0, 1.2487 + GR_GL_TEXTURE_2D, 1.2488 + texID, 1.2489 + 0)); 1.2490 + viewport->fLeft = 0; 1.2491 + viewport->fBottom = 0; 1.2492 + viewport->fWidth = surface->width(); 1.2493 + viewport->fHeight = surface->height(); 1.2494 + } else { 1.2495 + tempFBOID = 0; 1.2496 + GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID())); 1.2497 + *viewport = rt->getViewport(); 1.2498 + } 1.2499 + return tempFBOID; 1.2500 +} 1.2501 + 1.2502 +} 1.2503 + 1.2504 +void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { 1.2505 + // Check for format issues with glCopyTexSubImage2D 1.2506 + if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() && 1.2507 + kBGRA_8888_GrPixelConfig == src->config()) { 1.2508 + // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target 1.2509 + // in order to call glBlitFramebuffer or to copy to it by rendering. 1.2510 + INHERITED::initCopySurfaceDstDesc(src, desc); 1.2511 + return; 1.2512 + } else if (NULL == src->asRenderTarget()) { 1.2513 + // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base 1.2514 + // class handle it by rendering. 1.2515 + INHERITED::initCopySurfaceDstDesc(src, desc); 1.2516 + return; 1.2517 + } 1.2518 + 1.2519 + const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 1.2520 + if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 1.2521 + // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. 1.2522 + INHERITED::initCopySurfaceDstDesc(src, desc); 1.2523 + } else { 1.2524 + desc->fConfig = src->config(); 1.2525 + desc->fOrigin = src->origin(); 1.2526 + desc->fFlags = kNone_GrTextureFlags; 1.2527 + } 1.2528 +} 1.2529 + 1.2530 +bool GrGpuGL::onCopySurface(GrSurface* dst, 1.2531 + GrSurface* src, 1.2532 + const SkIRect& srcRect, 1.2533 + const SkIPoint& dstPoint) { 1.2534 + bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 1.2535 + bool copied = false; 1.2536 + bool wouldNeedTempFBO = false; 1.2537 + if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && 1.2538 + (!wouldNeedTempFBO || !inheritedCouldCopy)) { 1.2539 + GrGLuint srcFBO; 1.2540 + GrGLIRect srcVP; 1.2541 + srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP); 1.2542 + GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture()); 1.2543 + SkASSERT(NULL != dstTex); 1.2544 + // We modified the bound FBO 1.2545 + fHWBoundRenderTarget = NULL; 1.2546 + GrGLIRect srcGLRect; 1.2547 + srcGLRect.setRelativeTo(srcVP, 1.2548 + srcRect.fLeft, 1.2549 + srcRect.fTop, 1.2550 + srcRect.width(), 1.2551 + srcRect.height(), 1.2552 + src->origin()); 1.2553 + 1.2554 + this->setScratchTextureUnit(); 1.2555 + GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID())); 1.2556 + GrGLint dstY; 1.2557 + if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 1.2558 + dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); 1.2559 + } else { 1.2560 + dstY = dstPoint.fY; 1.2561 + } 1.2562 + GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0, 1.2563 + dstPoint.fX, dstY, 1.2564 + srcGLRect.fLeft, srcGLRect.fBottom, 1.2565 + srcGLRect.fWidth, srcGLRect.fHeight)); 1.2566 + copied = true; 1.2567 + if (srcFBO) { 1.2568 + GL_CALL(DeleteFramebuffers(1, &srcFBO)); 1.2569 + } 1.2570 + } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && 1.2571 + (!wouldNeedTempFBO || !inheritedCouldCopy)) { 1.2572 + SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 1.2573 + srcRect.width(), srcRect.height()); 1.2574 + bool selfOverlap = false; 1.2575 + if (dst->isSameAs(src)) { 1.2576 + selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect); 1.2577 + } 1.2578 + 1.2579 + if (!selfOverlap) { 1.2580 + GrGLuint dstFBO; 1.2581 + GrGLuint srcFBO; 1.2582 + GrGLIRect dstVP; 1.2583 + GrGLIRect srcVP; 1.2584 + dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP); 1.2585 + srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP); 1.2586 + // We modified the bound FBO 1.2587 + fHWBoundRenderTarget = NULL; 1.2588 + GrGLIRect srcGLRect; 1.2589 + GrGLIRect dstGLRect; 1.2590 + srcGLRect.setRelativeTo(srcVP, 1.2591 + srcRect.fLeft, 1.2592 + srcRect.fTop, 1.2593 + srcRect.width(), 1.2594 + srcRect.height(), 1.2595 + src->origin()); 1.2596 + dstGLRect.setRelativeTo(dstVP, 1.2597 + dstRect.fLeft, 1.2598 + dstRect.fTop, 1.2599 + dstRect.width(), 1.2600 + dstRect.height(), 1.2601 + dst->origin()); 1.2602 + 1.2603 + GrAutoTRestore<ScissorState> asr; 1.2604 + if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { 1.2605 + // The EXT version applies the scissor during the blit, so disable it. 1.2606 + asr.reset(&fScissorState); 1.2607 + fScissorState.fEnabled = false; 1.2608 + this->flushScissor(); 1.2609 + } 1.2610 + GrGLint srcY0; 1.2611 + GrGLint srcY1; 1.2612 + // Does the blit need to y-mirror or not? 1.2613 + if (src->origin() == dst->origin()) { 1.2614 + srcY0 = srcGLRect.fBottom; 1.2615 + srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; 1.2616 + } else { 1.2617 + srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; 1.2618 + srcY1 = srcGLRect.fBottom; 1.2619 + } 1.2620 + GL_CALL(BlitFramebuffer(srcGLRect.fLeft, 1.2621 + srcY0, 1.2622 + srcGLRect.fLeft + srcGLRect.fWidth, 1.2623 + srcY1, 1.2624 + dstGLRect.fLeft, 1.2625 + dstGLRect.fBottom, 1.2626 + dstGLRect.fLeft + dstGLRect.fWidth, 1.2627 + dstGLRect.fBottom + dstGLRect.fHeight, 1.2628 + GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 1.2629 + if (dstFBO) { 1.2630 + GL_CALL(DeleteFramebuffers(1, &dstFBO)); 1.2631 + } 1.2632 + if (srcFBO) { 1.2633 + GL_CALL(DeleteFramebuffers(1, &srcFBO)); 1.2634 + } 1.2635 + copied = true; 1.2636 + } 1.2637 + } 1.2638 + if (!copied && inheritedCouldCopy) { 1.2639 + copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint); 1.2640 + SkASSERT(copied); 1.2641 + } 1.2642 + return copied; 1.2643 +} 1.2644 + 1.2645 +bool GrGpuGL::onCanCopySurface(GrSurface* dst, 1.2646 + GrSurface* src, 1.2647 + const SkIRect& srcRect, 1.2648 + const SkIPoint& dstPoint) { 1.2649 + // This mirrors the logic in onCopySurface. 1.2650 + if (can_copy_texsubimage(dst, src, this)) { 1.2651 + return true; 1.2652 + } 1.2653 + if (can_blit_framebuffer(dst, src, this)) { 1.2654 + if (dst->isSameAs(src)) { 1.2655 + SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 1.2656 + srcRect.width(), srcRect.height()); 1.2657 + if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 1.2658 + return true; 1.2659 + } 1.2660 + } else { 1.2661 + return true; 1.2662 + } 1.2663 + } 1.2664 + return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 1.2665 +} 1.2666 + 1.2667 +void GrGpuGL::onInstantGpuTraceEvent(const char* marker) { 1.2668 + if (this->caps()->gpuTracingSupport()) { 1.2669 + // GL_CALL(InsertEventMarker(0, marker)); 1.2670 + } 1.2671 +} 1.2672 + 1.2673 +void GrGpuGL::onPushGpuTraceEvent(const char* marker) { 1.2674 + if (this->caps()->gpuTracingSupport()) { 1.2675 + // GL_CALL(PushGroupMarker(0, marker)); 1.2676 + } 1.2677 +} 1.2678 + 1.2679 +void GrGpuGL::onPopGpuTraceEvent() { 1.2680 + if (this->caps()->gpuTracingSupport()) { 1.2681 + // GL_CALL(PopGroupMarker()); 1.2682 + } 1.2683 +} 1.2684 + 1.2685 +/////////////////////////////////////////////////////////////////////////////// 1.2686 + 1.2687 +GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw( 1.2688 + GrGpuGL* gpu, 1.2689 + const GrGLVertexBuffer* vbuffer, 1.2690 + const GrGLIndexBuffer* ibuffer) { 1.2691 + SkASSERT(NULL != vbuffer); 1.2692 + GrGLAttribArrayState* attribState; 1.2693 + 1.2694 + // We use a vertex array if we're on a core profile and the verts are in a VBO. 1.2695 + if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { 1.2696 + if (NULL == fVBOVertexArray || !fVBOVertexArray->isValid()) { 1.2697 + SkSafeUnref(fVBOVertexArray); 1.2698 + GrGLuint arrayID; 1.2699 + GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 1.2700 + int attrCount = gpu->glCaps().maxVertexAttributes(); 1.2701 + fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount)); 1.2702 + } 1.2703 + attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); 1.2704 + } else { 1.2705 + if (NULL != ibuffer) { 1.2706 + this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()); 1.2707 + } else { 1.2708 + this->setVertexArrayID(gpu, 0); 1.2709 + } 1.2710 + int attrCount = gpu->glCaps().maxVertexAttributes(); 1.2711 + if (fDefaultVertexArrayAttribState.count() != attrCount) { 1.2712 + fDefaultVertexArrayAttribState.resize(attrCount); 1.2713 + } 1.2714 + attribState = &fDefaultVertexArrayAttribState; 1.2715 + } 1.2716 + return attribState; 1.2717 +}