michael@0: /* michael@0: * Copyright 2011 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: michael@0: #include "GrGpuGL.h" michael@0: #include "GrGLStencilBuffer.h" michael@0: #include "GrGLPath.h" michael@0: #include "GrGLShaderBuilder.h" michael@0: #include "GrTemplates.h" michael@0: #include "GrTypes.h" michael@0: #include "SkStrokeRec.h" michael@0: #include "SkTemplates.h" michael@0: michael@0: #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) michael@0: #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) michael@0: michael@0: #define SKIP_CACHE_CHECK true michael@0: michael@0: #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR michael@0: #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) michael@0: #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) michael@0: #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) michael@0: #else michael@0: #define CLEAR_ERROR_BEFORE_ALLOC(iface) michael@0: #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) michael@0: #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR michael@0: #endif michael@0: michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: static const GrGLenum gXfermodeCoeff2Blend[] = { michael@0: GR_GL_ZERO, michael@0: GR_GL_ONE, michael@0: GR_GL_SRC_COLOR, michael@0: GR_GL_ONE_MINUS_SRC_COLOR, michael@0: GR_GL_DST_COLOR, michael@0: GR_GL_ONE_MINUS_DST_COLOR, michael@0: GR_GL_SRC_ALPHA, michael@0: GR_GL_ONE_MINUS_SRC_ALPHA, michael@0: GR_GL_DST_ALPHA, michael@0: GR_GL_ONE_MINUS_DST_ALPHA, michael@0: GR_GL_CONSTANT_COLOR, michael@0: GR_GL_ONE_MINUS_CONSTANT_COLOR, michael@0: GR_GL_CONSTANT_ALPHA, michael@0: GR_GL_ONE_MINUS_CONSTANT_ALPHA, michael@0: michael@0: // extended blend coeffs michael@0: GR_GL_SRC1_COLOR, michael@0: GR_GL_ONE_MINUS_SRC1_COLOR, michael@0: GR_GL_SRC1_ALPHA, michael@0: GR_GL_ONE_MINUS_SRC1_ALPHA, michael@0: }; michael@0: michael@0: bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { michael@0: static const bool gCoeffReferencesBlendConst[] = { michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: true, michael@0: true, michael@0: true, michael@0: true, michael@0: michael@0: // extended blend coeffs michael@0: false, michael@0: false, michael@0: false, michael@0: false, michael@0: }; michael@0: return gCoeffReferencesBlendConst[coeff]; michael@0: GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == michael@0: GR_ARRAY_COUNT(gCoeffReferencesBlendConst)); michael@0: michael@0: GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); michael@0: michael@0: GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); michael@0: GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); michael@0: michael@0: // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope michael@0: GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == michael@0: GR_ARRAY_COUNT(gXfermodeCoeff2Blend)); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: static bool gPrintStartupSpew; michael@0: michael@0: GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context) michael@0: : GrGpu(context) michael@0: , fGLContext(ctx) { michael@0: michael@0: SkASSERT(ctx.isInitialized()); michael@0: fCaps.reset(SkRef(ctx.caps())); michael@0: michael@0: fHWBoundTextures.reset(this->glCaps().maxFragmentTextureUnits()); michael@0: fHWTexGenSettings.reset(this->glCaps().maxFixedFunctionTextureCoords()); michael@0: michael@0: GrGLClearErr(fGLContext.interface()); michael@0: if (gPrintStartupSpew) { michael@0: const GrGLubyte* vendor; michael@0: const GrGLubyte* renderer; michael@0: const GrGLubyte* version; michael@0: GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); michael@0: GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); michael@0: GL_CALL_RET(version, GetString(GR_GL_VERSION)); michael@0: GrPrintf("------------------------- create GrGpuGL %p --------------\n", michael@0: this); michael@0: GrPrintf("------ VENDOR %s\n", vendor); michael@0: GrPrintf("------ RENDERER %s\n", renderer); michael@0: GrPrintf("------ VERSION %s\n", version); michael@0: GrPrintf("------ EXTENSIONS\n"); michael@0: #if 0 // TODO: Reenable this after GrGLInterface's extensions can be accessed safely. michael@0: ctx.extensions().print(); michael@0: #endif michael@0: GrPrintf("\n"); michael@0: GrPrintf(this->glCaps().dump().c_str()); michael@0: } michael@0: michael@0: fProgramCache = SkNEW_ARGS(ProgramCache, (this)); michael@0: michael@0: SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt); michael@0: michael@0: fLastSuccessfulStencilFmtIdx = 0; michael@0: fHWProgramID = 0; michael@0: } michael@0: michael@0: GrGpuGL::~GrGpuGL() { michael@0: if (0 != fHWProgramID) { michael@0: // detach the current program so there is no confusion on OpenGL's part michael@0: // that we want it to be deleted michael@0: SkASSERT(fHWProgramID == fCurrentProgram->programID()); michael@0: GL_CALL(UseProgram(0)); michael@0: } michael@0: michael@0: delete fProgramCache; michael@0: michael@0: // This must be called by before the GrDrawTarget destructor michael@0: this->releaseGeometry(); michael@0: // This subclass must do this before the base class destructor runs michael@0: // since we will unref the GrGLInterface. michael@0: this->releaseResources(); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: michael@0: GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig, michael@0: GrPixelConfig surfaceConfig) const { michael@0: if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) { michael@0: return kBGRA_8888_GrPixelConfig; michael@0: } else if (this->glContext().isMesa() && michael@0: GrBytesPerPixel(readConfig) == 4 && michael@0: GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { michael@0: // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa. michael@0: // Perhaps this should be guarded by some compiletime or runtime check. michael@0: return surfaceConfig; michael@0: } else if (readConfig == kBGRA_8888_GrPixelConfig && michael@0: !this->glCaps().readPixelsSupported(this->glInterface(), michael@0: GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) { michael@0: return kRGBA_8888_GrPixelConfig; michael@0: } else { michael@0: return readConfig; michael@0: } michael@0: } michael@0: michael@0: GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig, michael@0: GrPixelConfig surfaceConfig) const { michael@0: if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) { michael@0: return kBGRA_8888_GrPixelConfig; michael@0: } else { michael@0: return writeConfig; michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const { michael@0: if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) { michael@0: return false; michael@0: } michael@0: if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) { michael@0: // In general ES2 requires the internal format of the texture and the format of the src michael@0: // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA michael@0: // texture. It depends upon which extension added BGRA. The Apple extension allows it michael@0: // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own michael@0: // internal format). michael@0: if (this->glCaps().bgraFormatSupport() && michael@0: !this->glCaps().bgraIsInternalFormat() && michael@0: kBGRA_8888_GrPixelConfig == srcConfig && michael@0: kRGBA_8888_GrPixelConfig == texture->config()) { michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } else { michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const { michael@0: return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); michael@0: } michael@0: michael@0: void GrGpuGL::onResetContext(uint32_t resetBits) { michael@0: // we don't use the zb at all michael@0: if (resetBits & kMisc_GrGLBackendState) { michael@0: GL_CALL(Disable(GR_GL_DEPTH_TEST)); michael@0: GL_CALL(DepthMask(GR_GL_FALSE)); michael@0: michael@0: fHWDrawFace = GrDrawState::kInvalid_DrawFace; michael@0: fHWDitherEnabled = kUnknown_TriState; michael@0: michael@0: if (kGL_GrGLStandard == this->glStandard()) { michael@0: // Desktop-only state that we never change michael@0: if (!this->glCaps().isCoreProfile()) { michael@0: GL_CALL(Disable(GR_GL_POINT_SMOOTH)); michael@0: GL_CALL(Disable(GR_GL_LINE_SMOOTH)); michael@0: GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); michael@0: GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); michael@0: GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); michael@0: GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); michael@0: } michael@0: // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a michael@0: // core profile. This seems like a bug since the core spec removes any mention of michael@0: // GL_ARB_imaging. michael@0: if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { michael@0: GL_CALL(Disable(GR_GL_COLOR_TABLE)); michael@0: } michael@0: GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); michael@0: // Since ES doesn't support glPointSize at all we always use the VS to michael@0: // set the point size michael@0: GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); michael@0: michael@0: // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't michael@0: // currently part of our gl interface. There are probably others as michael@0: // well. michael@0: } michael@0: fHWWriteToColor = kUnknown_TriState; michael@0: // we only ever use lines in hairline mode michael@0: GL_CALL(LineWidth(1)); michael@0: } michael@0: michael@0: if (resetBits & kAA_GrGLBackendState) { michael@0: fHWAAState.invalidate(); michael@0: } michael@0: michael@0: fHWActiveTextureUnitIdx = -1; // invalid michael@0: michael@0: if (resetBits & kTextureBinding_GrGLBackendState) { michael@0: for (int s = 0; s < fHWBoundTextures.count(); ++s) { michael@0: fHWBoundTextures[s] = NULL; michael@0: } michael@0: } michael@0: michael@0: if (resetBits & kBlend_GrGLBackendState) { michael@0: fHWBlendState.invalidate(); michael@0: } michael@0: michael@0: if (resetBits & kView_GrGLBackendState) { michael@0: fHWScissorSettings.invalidate(); michael@0: fHWViewport.invalidate(); michael@0: } michael@0: michael@0: if (resetBits & kStencil_GrGLBackendState) { michael@0: fHWStencilSettings.invalidate(); michael@0: fHWStencilTestEnabled = kUnknown_TriState; michael@0: } michael@0: michael@0: // Vertex michael@0: if (resetBits & kVertex_GrGLBackendState) { michael@0: fHWGeometryState.invalidate(); michael@0: } michael@0: michael@0: if (resetBits & kRenderTarget_GrGLBackendState) { michael@0: fHWBoundRenderTarget = NULL; michael@0: } michael@0: michael@0: if (resetBits & (kFixedFunction_GrGLBackendState | kPathRendering_GrGLBackendState)) { michael@0: if (this->glCaps().fixedFunctionSupport()) { michael@0: fHWProjectionMatrixState.invalidate(); michael@0: // we don't use the model view matrix. michael@0: GL_CALL(MatrixMode(GR_GL_MODELVIEW)); michael@0: GL_CALL(LoadIdentity()); michael@0: michael@0: for (int i = 0; i < this->glCaps().maxFixedFunctionTextureCoords(); ++i) { michael@0: GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + i)); michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_S)); michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_T)); michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_Q)); michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_R)); michael@0: if (this->caps()->pathRenderingSupport()) { michael@0: GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL)); michael@0: } michael@0: fHWTexGenSettings[i].fMode = GR_GL_NONE; michael@0: fHWTexGenSettings[i].fNumComponents = 0; michael@0: } michael@0: fHWActiveTexGenSets = 0; michael@0: } michael@0: if (this->caps()->pathRenderingSupport()) { michael@0: fHWPathStencilSettings.invalidate(); michael@0: } michael@0: } michael@0: michael@0: // we assume these values michael@0: if (resetBits & kPixelStore_GrGLBackendState) { michael@0: if (this->glCaps().unpackRowLengthSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); michael@0: } michael@0: if (this->glCaps().packRowLengthSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); michael@0: } michael@0: if (this->glCaps().unpackFlipYSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); michael@0: } michael@0: if (this->glCaps().packFlipYSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); michael@0: } michael@0: } michael@0: michael@0: if (resetBits & kProgram_GrGLBackendState) { michael@0: fHWProgramID = 0; michael@0: fSharedGLProgramState.invalidate(); michael@0: } michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { michael@0: // By default, GrRenderTargets are GL's normal orientation so that they michael@0: // can be drawn to by the outside world without the client having michael@0: // to render upside down. michael@0: if (kDefault_GrSurfaceOrigin == origin) { michael@0: return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin; michael@0: } else { michael@0: return origin; michael@0: } michael@0: } michael@0: michael@0: } michael@0: michael@0: GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) { michael@0: if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { michael@0: return NULL; michael@0: } michael@0: michael@0: if (0 == desc.fTextureHandle) { michael@0: return NULL; michael@0: } michael@0: michael@0: int maxSize = this->caps()->maxTextureSize(); michael@0: if (desc.fWidth > maxSize || desc.fHeight > maxSize) { michael@0: return NULL; michael@0: } michael@0: michael@0: GrGLTexture::Desc glTexDesc; michael@0: // next line relies on GrBackendTextureDesc's flags matching GrTexture's michael@0: glTexDesc.fFlags = (GrTextureFlags) desc.fFlags; michael@0: glTexDesc.fWidth = desc.fWidth; michael@0: glTexDesc.fHeight = desc.fHeight; michael@0: glTexDesc.fConfig = desc.fConfig; michael@0: glTexDesc.fSampleCnt = desc.fSampleCnt; michael@0: glTexDesc.fTextureID = static_cast(desc.fTextureHandle); michael@0: glTexDesc.fIsWrapped = true; michael@0: bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag); michael@0: // FIXME: this should be calling resolve_origin(), but Chrome code is currently michael@0: // assuming the old behaviour, which is that backend textures are always michael@0: // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: michael@0: // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); michael@0: if (kDefault_GrSurfaceOrigin == desc.fOrigin) { michael@0: glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; michael@0: } else { michael@0: glTexDesc.fOrigin = desc.fOrigin; michael@0: } michael@0: michael@0: GrGLTexture* texture = NULL; michael@0: if (renderTarget) { michael@0: GrGLRenderTarget::Desc glRTDesc; michael@0: glRTDesc.fRTFBOID = 0; michael@0: glRTDesc.fTexFBOID = 0; michael@0: glRTDesc.fMSColorRenderbufferID = 0; michael@0: glRTDesc.fConfig = desc.fConfig; michael@0: glRTDesc.fSampleCnt = desc.fSampleCnt; michael@0: glRTDesc.fOrigin = glTexDesc.fOrigin; michael@0: glRTDesc.fCheckAllocation = false; michael@0: if (!this->createRenderTargetObjects(glTexDesc.fWidth, michael@0: glTexDesc.fHeight, michael@0: glTexDesc.fTextureID, michael@0: &glRTDesc)) { michael@0: return NULL; michael@0: } michael@0: texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); michael@0: } else { michael@0: texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); michael@0: } michael@0: if (NULL == texture) { michael@0: return NULL; michael@0: } michael@0: michael@0: return texture; michael@0: } michael@0: michael@0: GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { michael@0: GrGLRenderTarget::Desc glDesc; michael@0: glDesc.fConfig = desc.fConfig; michael@0: glDesc.fRTFBOID = static_cast(desc.fRenderTargetHandle); michael@0: glDesc.fMSColorRenderbufferID = 0; michael@0: glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; michael@0: glDesc.fSampleCnt = desc.fSampleCnt; michael@0: glDesc.fIsWrapped = true; michael@0: glDesc.fCheckAllocation = false; michael@0: michael@0: glDesc.fOrigin = resolve_origin(desc.fOrigin, true); michael@0: GrGLIRect viewport; michael@0: viewport.fLeft = 0; michael@0: viewport.fBottom = 0; michael@0: viewport.fWidth = desc.fWidth; michael@0: viewport.fHeight = desc.fHeight; michael@0: michael@0: GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, michael@0: (this, glDesc, viewport)); michael@0: if (desc.fStencilBits) { michael@0: GrGLStencilBuffer::Format format; michael@0: format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat; michael@0: format.fPacked = false; michael@0: format.fStencilBits = desc.fStencilBits; michael@0: format.fTotalBits = desc.fStencilBits; michael@0: static const bool kIsSBWrapped = false; michael@0: GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer, michael@0: (this, michael@0: kIsSBWrapped, michael@0: 0, michael@0: desc.fWidth, michael@0: desc.fHeight, michael@0: desc.fSampleCnt, michael@0: format)); michael@0: tgt->setStencilBuffer(sb); michael@0: sb->unref(); michael@0: } michael@0: return tgt; michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: bool GrGpuGL::onWriteTexturePixels(GrTexture* texture, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig config, const void* buffer, michael@0: size_t rowBytes) { michael@0: if (NULL == buffer) { michael@0: return false; michael@0: } michael@0: GrGLTexture* glTex = static_cast(texture); michael@0: michael@0: this->setScratchTextureUnit(); michael@0: GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); michael@0: GrGLTexture::Desc desc; michael@0: desc.fFlags = glTex->desc().fFlags; michael@0: desc.fWidth = glTex->width(); michael@0: desc.fHeight = glTex->height(); michael@0: desc.fConfig = glTex->config(); michael@0: desc.fSampleCnt = glTex->desc().fSampleCnt; michael@0: desc.fTextureID = glTex->textureID(); michael@0: desc.fOrigin = glTex->origin(); michael@0: michael@0: if (this->uploadTexData(desc, false, michael@0: left, top, width, height, michael@0: config, buffer, rowBytes)) { michael@0: texture->dirtyMipMaps(true); michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: namespace { michael@0: bool adjust_pixel_ops_params(int surfaceWidth, michael@0: int surfaceHeight, michael@0: size_t bpp, michael@0: int* left, int* top, int* width, int* height, michael@0: const void** data, michael@0: size_t* rowBytes) { michael@0: if (!*rowBytes) { michael@0: *rowBytes = *width * bpp; michael@0: } michael@0: michael@0: SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height); michael@0: SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight); michael@0: michael@0: if (!subRect.intersect(bounds)) { michael@0: return false; michael@0: } michael@0: *data = reinterpret_cast(reinterpret_cast(*data) + michael@0: (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp); michael@0: michael@0: *left = subRect.fLeft; michael@0: *top = subRect.fTop; michael@0: *width = subRect.width(); michael@0: *height = subRect.height(); michael@0: return true; michael@0: } michael@0: michael@0: GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) { michael@0: if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) { michael@0: return GR_GL_GET_ERROR(interface); michael@0: } else { michael@0: return CHECK_ALLOC_ERROR(interface); michael@0: } michael@0: } michael@0: michael@0: } michael@0: michael@0: bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc, michael@0: bool isNewTexture, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig dataConfig, michael@0: const void* data, michael@0: size_t rowBytes) { michael@0: SkASSERT(NULL != data || isNewTexture); michael@0: michael@0: size_t bpp = GrBytesPerPixel(dataConfig); michael@0: if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top, michael@0: &width, &height, &data, &rowBytes)) { michael@0: return false; michael@0: } michael@0: size_t trimRowBytes = width * bpp; michael@0: michael@0: // in case we need a temporary, trimmed copy of the src pixels michael@0: SkAutoSMalloc<128 * 128> tempStorage; michael@0: michael@0: // paletted textures cannot be partially updated michael@0: // We currently lazily create MIPMAPs when the we see a draw with michael@0: // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the michael@0: // MIP levels are all created when the texture is created. So for now we don't use michael@0: // texture storage. michael@0: bool useTexStorage = false && michael@0: isNewTexture && michael@0: desc.fConfig != kIndex_8_GrPixelConfig && michael@0: this->glCaps().texStorageSupport(); michael@0: michael@0: if (useTexStorage && kGL_GrGLStandard == this->glStandard()) { michael@0: // 565 is not a sized internal format on desktop GL. So on desktop with michael@0: // 565 we always use an unsized internal format to let the system pick michael@0: // the best sized format to convert the 565 data to. Since TexStorage michael@0: // only allows sized internal formats we will instead use TexImage2D. michael@0: useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig; michael@0: } michael@0: michael@0: GrGLenum internalFormat; michael@0: GrGLenum externalFormat; michael@0: GrGLenum externalType; michael@0: // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized michael@0: // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the michael@0: // size of the internal format whenever possible and so only use a sized internal format when michael@0: // using texture storage. michael@0: if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat, michael@0: &externalFormat, &externalType)) { michael@0: return false; michael@0: } michael@0: michael@0: if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) { michael@0: // paletted textures cannot be updated michael@0: return false; michael@0: } michael@0: michael@0: /* michael@0: * check whether to allocate a temporary buffer for flipping y or michael@0: * because our srcData has extra bytes past each row. If so, we need michael@0: * to trim those off here, since GL ES may not let us specify michael@0: * GL_UNPACK_ROW_LENGTH. michael@0: */ michael@0: bool restoreGLRowLength = false; michael@0: bool swFlipY = false; michael@0: bool glFlipY = false; michael@0: if (NULL != data) { michael@0: if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { michael@0: if (this->glCaps().unpackFlipYSupport()) { michael@0: glFlipY = true; michael@0: } else { michael@0: swFlipY = true; michael@0: } michael@0: } michael@0: if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { michael@0: // can't use this for flipping, only non-neg values allowed. :( michael@0: if (rowBytes != trimRowBytes) { michael@0: GrGLint rowLength = static_cast(rowBytes / bpp); michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); michael@0: restoreGLRowLength = true; michael@0: } michael@0: } else { michael@0: if (trimRowBytes != rowBytes || swFlipY) { michael@0: // copy data into our new storage, skipping the trailing bytes michael@0: size_t trimSize = height * trimRowBytes; michael@0: const char* src = (const char*)data; michael@0: if (swFlipY) { michael@0: src += (height - 1) * rowBytes; michael@0: } michael@0: char* dst = (char*)tempStorage.reset(trimSize); michael@0: for (int y = 0; y < height; y++) { michael@0: memcpy(dst, src, trimRowBytes); michael@0: if (swFlipY) { michael@0: src -= rowBytes; michael@0: } else { michael@0: src += rowBytes; michael@0: } michael@0: dst += trimRowBytes; michael@0: } michael@0: // now point data to our copied version michael@0: data = tempStorage.get(); michael@0: } michael@0: } michael@0: if (glFlipY) { michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); michael@0: } michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast(bpp))); michael@0: } michael@0: bool succeeded = true; michael@0: if (isNewTexture && michael@0: 0 == left && 0 == top && michael@0: desc.fWidth == width && desc.fHeight == height) { michael@0: CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); michael@0: if (useTexStorage) { michael@0: // We never resize or change formats of textures. michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: TexStorage2D(GR_GL_TEXTURE_2D, michael@0: 1, // levels michael@0: internalFormat, michael@0: desc.fWidth, desc.fHeight)); michael@0: } else { michael@0: if (GR_GL_PALETTE8_RGBA8 == internalFormat) { michael@0: GrGLsizei imageSize = desc.fWidth * desc.fHeight + michael@0: kGrColorTableSize; michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: CompressedTexImage2D(GR_GL_TEXTURE_2D, michael@0: 0, // level michael@0: internalFormat, michael@0: desc.fWidth, desc.fHeight, michael@0: 0, // border michael@0: imageSize, michael@0: data)); michael@0: } else { michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: TexImage2D(GR_GL_TEXTURE_2D, michael@0: 0, // level michael@0: internalFormat, michael@0: desc.fWidth, desc.fHeight, michael@0: 0, // border michael@0: externalFormat, externalType, michael@0: data)); michael@0: } michael@0: } michael@0: GrGLenum error = check_alloc_error(desc, this->glInterface()); michael@0: if (error != GR_GL_NO_ERROR) { michael@0: succeeded = false; michael@0: } else { michael@0: // if we have data and we used TexStorage to create the texture, we michael@0: // now upload with TexSubImage. michael@0: if (NULL != data && useTexStorage) { michael@0: GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, michael@0: 0, // level michael@0: left, top, michael@0: width, height, michael@0: externalFormat, externalType, michael@0: data)); michael@0: } michael@0: } michael@0: } else { michael@0: if (swFlipY || glFlipY) { michael@0: top = desc.fHeight - (top + height); michael@0: } michael@0: GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, michael@0: 0, // level michael@0: left, top, michael@0: width, height, michael@0: externalFormat, externalType, data)); michael@0: } michael@0: michael@0: if (restoreGLRowLength) { michael@0: SkASSERT(this->glCaps().unpackRowLengthSupport()); michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); michael@0: } michael@0: if (glFlipY) { michael@0: GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); michael@0: } michael@0: return succeeded; michael@0: } michael@0: michael@0: static bool renderbuffer_storage_msaa(GrGLContext& ctx, michael@0: int sampleCount, michael@0: GrGLenum format, michael@0: int width, int height) { michael@0: CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); michael@0: SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); michael@0: switch (ctx.caps()->msFBOType()) { michael@0: case GrGLCaps::kDesktop_ARB_MSFBOType: michael@0: case GrGLCaps::kDesktop_EXT_MSFBOType: michael@0: case GrGLCaps::kES_3_0_MSFBOType: michael@0: GL_ALLOC_CALL(ctx.interface(), michael@0: RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, michael@0: sampleCount, michael@0: format, michael@0: width, height)); michael@0: break; michael@0: case GrGLCaps::kES_Apple_MSFBOType: michael@0: GL_ALLOC_CALL(ctx.interface(), michael@0: RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, michael@0: sampleCount, michael@0: format, michael@0: width, height)); michael@0: break; michael@0: case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: michael@0: case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: michael@0: GL_ALLOC_CALL(ctx.interface(), michael@0: RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, michael@0: sampleCount, michael@0: format, michael@0: width, height)); michael@0: break; michael@0: case GrGLCaps::kNone_MSFBOType: michael@0: GrCrash("Shouldn't be here if we don't support multisampled renderbuffers."); michael@0: break; michael@0: } michael@0: return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));; michael@0: } michael@0: michael@0: bool GrGpuGL::createRenderTargetObjects(int width, int height, michael@0: GrGLuint texID, michael@0: GrGLRenderTarget::Desc* desc) { michael@0: desc->fMSColorRenderbufferID = 0; michael@0: desc->fRTFBOID = 0; michael@0: desc->fTexFBOID = 0; michael@0: desc->fIsWrapped = false; michael@0: michael@0: GrGLenum status; michael@0: michael@0: GrGLenum msColorFormat = 0; // suppress warning michael@0: michael@0: if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { michael@0: goto FAILED; michael@0: } michael@0: michael@0: GL_CALL(GenFramebuffers(1, &desc->fTexFBOID)); michael@0: if (!desc->fTexFBOID) { michael@0: goto FAILED; michael@0: } michael@0: michael@0: michael@0: // If we are using multisampling we will create two FBOS. We render to one and then resolve to michael@0: // the texture bound to the other. The exception is the IMG multisample extension. With this michael@0: // extension the texture is multisampled when rendered to and then auto-resolves it when it is michael@0: // rendered from. michael@0: if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { michael@0: GL_CALL(GenFramebuffers(1, &desc->fRTFBOID)); michael@0: GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID)); michael@0: if (!desc->fRTFBOID || michael@0: !desc->fMSColorRenderbufferID || michael@0: !this->configToGLFormats(desc->fConfig, michael@0: // ES2 and ES3 require sized internal formats for rb storage. michael@0: kGLES_GrGLStandard == this->glStandard(), michael@0: &msColorFormat, michael@0: NULL, michael@0: NULL)) { michael@0: goto FAILED; michael@0: } michael@0: } else { michael@0: desc->fRTFBOID = desc->fTexFBOID; michael@0: } michael@0: michael@0: // below here we may bind the FBO michael@0: fHWBoundRenderTarget = NULL; michael@0: if (desc->fRTFBOID != desc->fTexFBOID) { michael@0: SkASSERT(desc->fSampleCnt > 0); michael@0: GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, michael@0: desc->fMSColorRenderbufferID)); michael@0: if (!renderbuffer_storage_msaa(fGLContext, michael@0: desc->fSampleCnt, michael@0: msColorFormat, michael@0: width, height)) { michael@0: goto FAILED; michael@0: } michael@0: GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID)); michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_COLOR_ATTACHMENT0, michael@0: GR_GL_RENDERBUFFER, michael@0: desc->fMSColorRenderbufferID)); michael@0: if (desc->fCheckAllocation || michael@0: !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { michael@0: GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); michael@0: if (status != GR_GL_FRAMEBUFFER_COMPLETE) { michael@0: goto FAILED; michael@0: } michael@0: fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); michael@0: } michael@0: } michael@0: GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID)); michael@0: michael@0: if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) { michael@0: GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, michael@0: GR_GL_COLOR_ATTACHMENT0, michael@0: GR_GL_TEXTURE_2D, michael@0: texID, 0, desc->fSampleCnt)); michael@0: } else { michael@0: GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, michael@0: GR_GL_COLOR_ATTACHMENT0, michael@0: GR_GL_TEXTURE_2D, michael@0: texID, 0)); michael@0: } michael@0: if (desc->fCheckAllocation || michael@0: !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { michael@0: GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); michael@0: if (status != GR_GL_FRAMEBUFFER_COMPLETE) { michael@0: goto FAILED; michael@0: } michael@0: fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); michael@0: } michael@0: michael@0: return true; michael@0: michael@0: FAILED: michael@0: if (desc->fMSColorRenderbufferID) { michael@0: GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID)); michael@0: } michael@0: if (desc->fRTFBOID != desc->fTexFBOID) { michael@0: GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID)); michael@0: } michael@0: if (desc->fTexFBOID) { michael@0: GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID)); michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: // good to set a break-point here to know when createTexture fails michael@0: static GrTexture* return_null_texture() { michael@0: // SkDEBUGFAIL("null texture"); michael@0: return NULL; michael@0: } michael@0: michael@0: #if 0 && defined(SK_DEBUG) michael@0: static size_t as_size_t(int x) { michael@0: return x; michael@0: } michael@0: #endif michael@0: michael@0: GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc, michael@0: const void* srcData, michael@0: size_t rowBytes) { michael@0: michael@0: GrGLTexture::Desc glTexDesc; michael@0: GrGLRenderTarget::Desc glRTDesc; michael@0: michael@0: // Attempt to catch un- or wrongly initialized sample counts; michael@0: SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); michael@0: // We fail if the MSAA was requested and is not available. michael@0: if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) { michael@0: //GrPrintf("MSAA RT requested but not supported on this platform."); michael@0: return return_null_texture(); michael@0: } michael@0: // If the sample count exceeds the max then we clamp it. michael@0: glTexDesc.fSampleCnt = GrMin(desc.fSampleCnt, this->caps()->maxSampleCount()); michael@0: michael@0: glTexDesc.fFlags = desc.fFlags; michael@0: glTexDesc.fWidth = desc.fWidth; michael@0: glTexDesc.fHeight = desc.fHeight; michael@0: glTexDesc.fConfig = desc.fConfig; michael@0: glTexDesc.fIsWrapped = false; michael@0: michael@0: glRTDesc.fMSColorRenderbufferID = 0; michael@0: glRTDesc.fRTFBOID = 0; michael@0: glRTDesc.fTexFBOID = 0; michael@0: glRTDesc.fIsWrapped = false; michael@0: glRTDesc.fConfig = glTexDesc.fConfig; michael@0: glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit); michael@0: michael@0: bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit); michael@0: michael@0: glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); michael@0: glRTDesc.fOrigin = glTexDesc.fOrigin; michael@0: michael@0: glRTDesc.fSampleCnt = glTexDesc.fSampleCnt; michael@0: if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && michael@0: desc.fSampleCnt) { michael@0: //GrPrintf("MSAA RT requested but not supported on this platform."); michael@0: return return_null_texture(); michael@0: } michael@0: michael@0: if (renderTarget) { michael@0: int maxRTSize = this->caps()->maxRenderTargetSize(); michael@0: if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) { michael@0: return return_null_texture(); michael@0: } michael@0: } else { michael@0: int maxSize = this->caps()->maxTextureSize(); michael@0: if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) { michael@0: return return_null_texture(); michael@0: } michael@0: } michael@0: michael@0: GL_CALL(GenTextures(1, &glTexDesc.fTextureID)); michael@0: michael@0: if (!glTexDesc.fTextureID) { michael@0: return return_null_texture(); michael@0: } michael@0: michael@0: this->setScratchTextureUnit(); michael@0: GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID)); michael@0: michael@0: if (renderTarget && this->glCaps().textureUsageSupport()) { michael@0: // provides a hint about how this texture will be used michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_USAGE, michael@0: GR_GL_FRAMEBUFFER_ATTACHMENT)); michael@0: } michael@0: michael@0: // Some drivers like to know filter/wrap before seeing glTexImage2D. Some michael@0: // drivers have a bug where an FBO won't be complete if it includes a michael@0: // texture that is not mipmap complete (considering the filter in use). michael@0: GrGLTexture::TexParams initialTexParams; michael@0: // we only set a subset here so invalidate first michael@0: initialTexParams.invalidate(); michael@0: initialTexParams.fMinFilter = GR_GL_NEAREST; michael@0: initialTexParams.fMagFilter = GR_GL_NEAREST; michael@0: initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; michael@0: initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_MAG_FILTER, michael@0: initialTexParams.fMagFilter)); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_MIN_FILTER, michael@0: initialTexParams.fMinFilter)); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_WRAP_S, michael@0: initialTexParams.fWrapS)); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_WRAP_T, michael@0: initialTexParams.fWrapT)); michael@0: if (!this->uploadTexData(glTexDesc, true, 0, 0, michael@0: glTexDesc.fWidth, glTexDesc.fHeight, michael@0: desc.fConfig, srcData, rowBytes)) { michael@0: GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); michael@0: return return_null_texture(); michael@0: } michael@0: michael@0: GrGLTexture* tex; michael@0: if (renderTarget) { michael@0: // unbind the texture from the texture unit before binding it to the frame buffer michael@0: GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); michael@0: michael@0: if (!this->createRenderTargetObjects(glTexDesc.fWidth, michael@0: glTexDesc.fHeight, michael@0: glTexDesc.fTextureID, michael@0: &glRTDesc)) { michael@0: GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); michael@0: return return_null_texture(); michael@0: } michael@0: tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); michael@0: } else { michael@0: tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); michael@0: } michael@0: tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); michael@0: #ifdef TRACE_TEXTURE_CREATION michael@0: GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n", michael@0: glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); michael@0: #endif michael@0: return tex; michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount; michael@0: michael@0: void inline get_stencil_rb_sizes(const GrGLInterface* gl, michael@0: GrGLStencilBuffer::Format* format) { michael@0: michael@0: // we shouldn't ever know one size and not the other michael@0: SkASSERT((kUnknownBitCount == format->fStencilBits) == michael@0: (kUnknownBitCount == format->fTotalBits)); michael@0: if (kUnknownBitCount == format->fStencilBits) { michael@0: GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, michael@0: GR_GL_RENDERBUFFER_STENCIL_SIZE, michael@0: (GrGLint*)&format->fStencilBits); michael@0: if (format->fPacked) { michael@0: GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, michael@0: GR_GL_RENDERBUFFER_DEPTH_SIZE, michael@0: (GrGLint*)&format->fTotalBits); michael@0: format->fTotalBits += format->fStencilBits; michael@0: } else { michael@0: format->fTotalBits = format->fStencilBits; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt, michael@0: int width, int height) { michael@0: michael@0: // All internally created RTs are also textures. We don't create michael@0: // SBs for a client's standalone RT (that is a RT that isn't also a texture). michael@0: SkASSERT(rt->asTexture()); michael@0: SkASSERT(width >= rt->width()); michael@0: SkASSERT(height >= rt->height()); michael@0: michael@0: int samples = rt->numSamples(); michael@0: GrGLuint sbID; michael@0: GL_CALL(GenRenderbuffers(1, &sbID)); michael@0: if (!sbID) { michael@0: return false; michael@0: } michael@0: michael@0: int stencilFmtCnt = this->glCaps().stencilFormats().count(); michael@0: for (int i = 0; i < stencilFmtCnt; ++i) { michael@0: GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID)); michael@0: // we start with the last stencil format that succeeded in hopes michael@0: // that we won't go through this loop more than once after the michael@0: // first (painful) stencil creation. michael@0: int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt; michael@0: const GrGLCaps::StencilFormat& sFmt = michael@0: this->glCaps().stencilFormats()[sIdx]; michael@0: CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); michael@0: // we do this "if" so that we don't call the multisample michael@0: // version on a GL that doesn't have an MSAA extension. michael@0: bool created; michael@0: if (samples > 0) { michael@0: created = renderbuffer_storage_msaa(fGLContext, michael@0: samples, michael@0: sFmt.fInternalFormat, michael@0: width, height); michael@0: } else { michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: RenderbufferStorage(GR_GL_RENDERBUFFER, michael@0: sFmt.fInternalFormat, michael@0: width, height)); michael@0: created = michael@0: (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface())); michael@0: } michael@0: if (created) { michael@0: // After sized formats we attempt an unsized format and take michael@0: // whatever sizes GL gives us. In that case we query for the size. michael@0: GrGLStencilBuffer::Format format = sFmt; michael@0: get_stencil_rb_sizes(this->glInterface(), &format); michael@0: static const bool kIsWrapped = false; michael@0: SkAutoTUnref sb(SkNEW_ARGS(GrGLStencilBuffer, michael@0: (this, kIsWrapped, sbID, width, height, michael@0: samples, format))); michael@0: if (this->attachStencilBufferToRenderTarget(sb, rt)) { michael@0: fLastSuccessfulStencilFmtIdx = sIdx; michael@0: sb->transferToCache(); michael@0: rt->setStencilBuffer(sb); michael@0: return true; michael@0: } michael@0: sb->abandon(); // otherwise we lose sbID michael@0: } michael@0: } michael@0: GL_CALL(DeleteRenderbuffers(1, &sbID)); michael@0: return false; michael@0: } michael@0: michael@0: bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) { michael@0: GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt; michael@0: michael@0: GrGLuint fbo = glrt->renderFBOID(); michael@0: michael@0: if (NULL == sb) { michael@0: if (NULL != rt->getStencilBuffer()) { michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_STENCIL_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, 0)); michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_DEPTH_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, 0)); michael@0: #ifdef SK_DEBUG michael@0: GrGLenum status; michael@0: GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); michael@0: SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); michael@0: #endif michael@0: } michael@0: return true; michael@0: } else { michael@0: GrGLStencilBuffer* glsb = static_cast(sb); michael@0: GrGLuint rb = glsb->renderbufferID(); michael@0: michael@0: fHWBoundRenderTarget = NULL; michael@0: GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo)); michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_STENCIL_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, rb)); michael@0: if (glsb->format().fPacked) { michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_DEPTH_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, rb)); michael@0: } else { michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_DEPTH_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, 0)); michael@0: } michael@0: michael@0: GrGLenum status; michael@0: if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) { michael@0: GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); michael@0: if (status != GR_GL_FRAMEBUFFER_COMPLETE) { michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_STENCIL_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, 0)); michael@0: if (glsb->format().fPacked) { michael@0: GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, michael@0: GR_GL_DEPTH_ATTACHMENT, michael@0: GR_GL_RENDERBUFFER, 0)); michael@0: } michael@0: return false; michael@0: } else { michael@0: fGLContext.caps()->markColorConfigAndStencilFormatAsVerified( michael@0: rt->config(), michael@0: glsb->format()); michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) { michael@0: GrGLVertexBuffer::Desc desc; michael@0: desc.fDynamic = dynamic; michael@0: desc.fSizeInBytes = size; michael@0: desc.fIsWrapped = false; michael@0: michael@0: if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { michael@0: desc.fID = 0; michael@0: GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); michael@0: return vertexBuffer; michael@0: } else { michael@0: GL_CALL(GenBuffers(1, &desc.fID)); michael@0: if (desc.fID) { michael@0: fHWGeometryState.setVertexBufferID(this, desc.fID); michael@0: CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); michael@0: // make sure driver can allocate memory for this buffer michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: BufferData(GR_GL_ARRAY_BUFFER, michael@0: (GrGLsizeiptr) desc.fSizeInBytes, michael@0: NULL, // data ptr michael@0: desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); michael@0: if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { michael@0: GL_CALL(DeleteBuffers(1, &desc.fID)); michael@0: this->notifyVertexBufferDelete(desc.fID); michael@0: return NULL; michael@0: } michael@0: GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); michael@0: return vertexBuffer; michael@0: } michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) { michael@0: GrGLIndexBuffer::Desc desc; michael@0: desc.fDynamic = dynamic; michael@0: desc.fSizeInBytes = size; michael@0: desc.fIsWrapped = false; michael@0: michael@0: if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { michael@0: desc.fID = 0; michael@0: GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); michael@0: return indexBuffer; michael@0: } else { michael@0: GL_CALL(GenBuffers(1, &desc.fID)); michael@0: if (desc.fID) { michael@0: fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID); michael@0: CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); michael@0: // make sure driver can allocate memory for this buffer michael@0: GL_ALLOC_CALL(this->glInterface(), michael@0: BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, michael@0: (GrGLsizeiptr) desc.fSizeInBytes, michael@0: NULL, // data ptr michael@0: desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); michael@0: if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { michael@0: GL_CALL(DeleteBuffers(1, &desc.fID)); michael@0: this->notifyIndexBufferDelete(desc.fID); michael@0: return NULL; michael@0: } michael@0: GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); michael@0: return indexBuffer; michael@0: } michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: GrPath* GrGpuGL::onCreatePath(const SkPath& inPath, const SkStrokeRec& stroke) { michael@0: SkASSERT(this->caps()->pathRenderingSupport()); michael@0: return SkNEW_ARGS(GrGLPath, (this, inPath, stroke)); michael@0: } michael@0: michael@0: void GrGpuGL::flushScissor() { michael@0: if (fScissorState.fEnabled) { michael@0: // Only access the RT if scissoring is being enabled. We can call this before performing michael@0: // a glBitframebuffer for a surface->surface copy, which requires no RT to be bound to the michael@0: // GrDrawState. michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: const GrGLRenderTarget* rt = michael@0: static_cast(drawState.getRenderTarget()); michael@0: michael@0: SkASSERT(NULL != rt); michael@0: const GrGLIRect& vp = rt->getViewport(); michael@0: GrGLIRect scissor; michael@0: scissor.setRelativeTo(vp, michael@0: fScissorState.fRect.fLeft, michael@0: fScissorState.fRect.fTop, michael@0: fScissorState.fRect.width(), michael@0: fScissorState.fRect.height(), michael@0: rt->origin()); michael@0: // if the scissor fully contains the viewport then we fall through and michael@0: // disable the scissor test. michael@0: if (!scissor.contains(vp)) { michael@0: if (fHWScissorSettings.fRect != scissor) { michael@0: scissor.pushToGLScissor(this->glInterface()); michael@0: fHWScissorSettings.fRect = scissor; michael@0: } michael@0: if (kYes_TriState != fHWScissorSettings.fEnabled) { michael@0: GL_CALL(Enable(GR_GL_SCISSOR_TEST)); michael@0: fHWScissorSettings.fEnabled = kYes_TriState; michael@0: } michael@0: return; michael@0: } michael@0: } michael@0: if (kNo_TriState != fHWScissorSettings.fEnabled) { michael@0: GL_CALL(Disable(GR_GL_SCISSOR_TEST)); michael@0: fHWScissorSettings.fEnabled = kNo_TriState; michael@0: return; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::onClear(const SkIRect* rect, GrColor color, bool canIgnoreRect) { michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: const GrRenderTarget* rt = drawState.getRenderTarget(); michael@0: // parent class should never let us get here with no RT michael@0: SkASSERT(NULL != rt); michael@0: michael@0: if (canIgnoreRect && this->glCaps().fullClearIsFree()) { michael@0: rect = NULL; michael@0: } michael@0: michael@0: SkIRect clippedRect; michael@0: if (NULL != rect) { michael@0: // flushScissor expects rect to be clipped to the target. michael@0: clippedRect = *rect; michael@0: SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height()); michael@0: if (clippedRect.intersect(rtRect)) { michael@0: rect = &clippedRect; michael@0: } else { michael@0: return; michael@0: } michael@0: } michael@0: michael@0: this->flushRenderTarget(rect); michael@0: GrAutoTRestore asr(&fScissorState); michael@0: fScissorState.fEnabled = (NULL != rect); michael@0: if (fScissorState.fEnabled) { michael@0: fScissorState.fRect = *rect; michael@0: } michael@0: this->flushScissor(); michael@0: michael@0: GrGLfloat r, g, b, a; michael@0: static const GrGLfloat scale255 = 1.f / 255.f; michael@0: a = GrColorUnpackA(color) * scale255; michael@0: GrGLfloat scaleRGB = scale255; michael@0: r = GrColorUnpackR(color) * scaleRGB; michael@0: g = GrColorUnpackG(color) * scaleRGB; michael@0: b = GrColorUnpackB(color) * scaleRGB; michael@0: michael@0: GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); michael@0: fHWWriteToColor = kYes_TriState; michael@0: GL_CALL(ClearColor(r, g, b, a)); michael@0: GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); michael@0: } michael@0: michael@0: void GrGpuGL::clearStencil() { michael@0: if (NULL == this->getDrawState().getRenderTarget()) { michael@0: return; michael@0: } michael@0: michael@0: this->flushRenderTarget(&SkIRect::EmptyIRect()); michael@0: michael@0: GrAutoTRestore asr(&fScissorState); michael@0: fScissorState.fEnabled = false; michael@0: this->flushScissor(); michael@0: michael@0: GL_CALL(StencilMask(0xffffffff)); michael@0: GL_CALL(ClearStencil(0)); michael@0: GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); michael@0: fHWStencilSettings.invalidate(); michael@0: } michael@0: michael@0: void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) { michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: const GrRenderTarget* rt = drawState.getRenderTarget(); michael@0: SkASSERT(NULL != rt); michael@0: michael@0: // this should only be called internally when we know we have a michael@0: // stencil buffer. michael@0: SkASSERT(NULL != rt->getStencilBuffer()); michael@0: GrGLint stencilBitCount = rt->getStencilBuffer()->bits(); michael@0: #if 0 michael@0: SkASSERT(stencilBitCount > 0); michael@0: GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); michael@0: #else michael@0: // we could just clear the clip bit but when we go through michael@0: // ANGLE a partial stencil mask will cause clears to be michael@0: // turned into draws. Our contract on GrDrawTarget says that michael@0: // changing the clip between stencil passes may or may not michael@0: // zero the client's clip bits. So we just clear the whole thing. michael@0: static const GrGLint clipStencilMask = ~0; michael@0: #endif michael@0: GrGLint value; michael@0: if (insideClip) { michael@0: value = (1 << (stencilBitCount - 1)); michael@0: } else { michael@0: value = 0; michael@0: } michael@0: this->flushRenderTarget(&SkIRect::EmptyIRect()); michael@0: michael@0: GrAutoTRestore asr(&fScissorState); michael@0: fScissorState.fEnabled = true; michael@0: fScissorState.fRect = rect; michael@0: this->flushScissor(); michael@0: michael@0: GL_CALL(StencilMask((uint32_t) clipStencilMask)); michael@0: GL_CALL(ClearStencil(value)); michael@0: GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); michael@0: fHWStencilSettings.invalidate(); michael@0: } michael@0: michael@0: void GrGpuGL::onForceRenderTargetFlush() { michael@0: this->flushRenderTarget(&SkIRect::EmptyIRect()); michael@0: } michael@0: michael@0: bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, michael@0: int left, int top, michael@0: int width, int height, michael@0: GrPixelConfig config, michael@0: size_t rowBytes) const { michael@0: // If this rendertarget is aready TopLeft, we don't need to flip. michael@0: if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { michael@0: return false; michael@0: } michael@0: michael@0: // if GL can do the flip then we'll never pay for it. michael@0: if (this->glCaps().packFlipYSupport()) { michael@0: return false; michael@0: } michael@0: michael@0: // If we have to do memcpy to handle non-trim rowBytes then we michael@0: // get the flip for free. Otherwise it costs. michael@0: if (this->glCaps().packRowLengthSupport()) { michael@0: return true; michael@0: } michael@0: // If we have to do memcpys to handle rowBytes then y-flip is free michael@0: // Note the rowBytes might be tight to the passed in data, but if data michael@0: // gets clipped in x to the target the rowBytes will no longer be tight. michael@0: if (left >= 0 && (left + width) < renderTarget->width()) { michael@0: return 0 == rowBytes || michael@0: GrBytesPerPixel(config) * width == rowBytes; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::onReadPixels(GrRenderTarget* target, michael@0: int left, int top, michael@0: int width, int height, michael@0: GrPixelConfig config, michael@0: void* buffer, michael@0: size_t rowBytes) { michael@0: GrGLenum format; michael@0: GrGLenum type; michael@0: bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin(); michael@0: if (!this->configToGLFormats(config, false, NULL, &format, &type)) { michael@0: return false; michael@0: } michael@0: size_t bpp = GrBytesPerPixel(config); michael@0: if (!adjust_pixel_ops_params(target->width(), target->height(), bpp, michael@0: &left, &top, &width, &height, michael@0: const_cast(&buffer), michael@0: &rowBytes)) { michael@0: return false; michael@0: } michael@0: michael@0: // resolve the render target if necessary michael@0: GrGLRenderTarget* tgt = static_cast(target); michael@0: GrDrawState::AutoRenderTargetRestore artr; michael@0: switch (tgt->getResolveType()) { michael@0: case GrGLRenderTarget::kCantResolve_ResolveType: michael@0: return false; michael@0: case GrGLRenderTarget::kAutoResolves_ResolveType: michael@0: artr.set(this->drawState(), target); michael@0: this->flushRenderTarget(&SkIRect::EmptyIRect()); michael@0: break; michael@0: case GrGLRenderTarget::kCanResolve_ResolveType: michael@0: this->onResolveRenderTarget(tgt); michael@0: // we don't track the state of the READ FBO ID. michael@0: GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, michael@0: tgt->textureFBOID())); michael@0: break; michael@0: default: michael@0: GrCrash("Unknown resolve type"); michael@0: } michael@0: michael@0: const GrGLIRect& glvp = tgt->getViewport(); michael@0: michael@0: // the read rect is viewport-relative michael@0: GrGLIRect readRect; michael@0: readRect.setRelativeTo(glvp, left, top, width, height, target->origin()); michael@0: michael@0: size_t tightRowBytes = bpp * width; michael@0: if (0 == rowBytes) { michael@0: rowBytes = tightRowBytes; michael@0: } michael@0: size_t readDstRowBytes = tightRowBytes; michael@0: void* readDst = buffer; michael@0: michael@0: // determine if GL can read using the passed rowBytes or if we need michael@0: // a scratch buffer. michael@0: SkAutoSMalloc<32 * sizeof(GrColor)> scratch; michael@0: if (rowBytes != tightRowBytes) { michael@0: if (this->glCaps().packRowLengthSupport()) { michael@0: SkASSERT(!(rowBytes % sizeof(GrColor))); michael@0: GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, michael@0: static_cast(rowBytes / sizeof(GrColor)))); michael@0: readDstRowBytes = rowBytes; michael@0: } else { michael@0: scratch.reset(tightRowBytes * height); michael@0: readDst = scratch.get(); michael@0: } michael@0: } michael@0: if (flipY && this->glCaps().packFlipYSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); michael@0: } michael@0: GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, michael@0: readRect.fWidth, readRect.fHeight, michael@0: format, type, readDst)); michael@0: if (readDstRowBytes != tightRowBytes) { michael@0: SkASSERT(this->glCaps().packRowLengthSupport()); michael@0: GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); michael@0: } michael@0: if (flipY && this->glCaps().packFlipYSupport()) { michael@0: GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); michael@0: flipY = false; michael@0: } michael@0: michael@0: // now reverse the order of the rows, since GL's are bottom-to-top, but our michael@0: // API presents top-to-bottom. We must preserve the padding contents. Note michael@0: // that the above readPixels did not overwrite the padding. michael@0: if (readDst == buffer) { michael@0: SkASSERT(rowBytes == readDstRowBytes); michael@0: if (flipY) { michael@0: scratch.reset(tightRowBytes); michael@0: void* tmpRow = scratch.get(); michael@0: // flip y in-place by rows michael@0: const int halfY = height >> 1; michael@0: char* top = reinterpret_cast(buffer); michael@0: char* bottom = top + (height - 1) * rowBytes; michael@0: for (int y = 0; y < halfY; y++) { michael@0: memcpy(tmpRow, top, tightRowBytes); michael@0: memcpy(top, bottom, tightRowBytes); michael@0: memcpy(bottom, tmpRow, tightRowBytes); michael@0: top += rowBytes; michael@0: bottom -= rowBytes; michael@0: } michael@0: } michael@0: } else { michael@0: SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes); michael@0: // copy from readDst to buffer while flipping y michael@0: // const int halfY = height >> 1; michael@0: const char* src = reinterpret_cast(readDst); michael@0: char* dst = reinterpret_cast(buffer); michael@0: if (flipY) { michael@0: dst += (height-1) * rowBytes; michael@0: } michael@0: for (int y = 0; y < height; y++) { michael@0: memcpy(dst, src, tightRowBytes); michael@0: src += readDstRowBytes; michael@0: if (!flipY) { michael@0: dst += rowBytes; michael@0: } else { michael@0: dst -= rowBytes; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void GrGpuGL::flushRenderTarget(const SkIRect* bound) { michael@0: michael@0: GrGLRenderTarget* rt = michael@0: static_cast(this->drawState()->getRenderTarget()); michael@0: SkASSERT(NULL != rt); michael@0: michael@0: if (fHWBoundRenderTarget != rt) { michael@0: GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID())); michael@0: #ifdef SK_DEBUG michael@0: // don't do this check in Chromium -- this is causing michael@0: // lots of repeated command buffer flushes when the compositor is michael@0: // rendering with Ganesh, which is really slow; even too slow for michael@0: // Debug mode. michael@0: if (!this->glContext().isChromium()) { michael@0: GrGLenum status; michael@0: GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); michael@0: if (status != GR_GL_FRAMEBUFFER_COMPLETE) { michael@0: GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status); michael@0: } michael@0: } michael@0: #endif michael@0: fHWBoundRenderTarget = rt; michael@0: const GrGLIRect& vp = rt->getViewport(); michael@0: if (fHWViewport != vp) { michael@0: vp.pushToGLViewport(this->glInterface()); michael@0: fHWViewport = vp; michael@0: } michael@0: } michael@0: if (NULL == bound || !bound->isEmpty()) { michael@0: rt->flagAsNeedingResolve(bound); michael@0: } michael@0: michael@0: GrTexture *texture = rt->asTexture(); michael@0: if (texture) { michael@0: texture->dirtyMipMaps(true); michael@0: } michael@0: } michael@0: michael@0: GrGLenum gPrimitiveType2GLMode[] = { michael@0: GR_GL_TRIANGLES, michael@0: GR_GL_TRIANGLE_STRIP, michael@0: GR_GL_TRIANGLE_FAN, michael@0: GR_GL_POINTS, michael@0: GR_GL_LINES, michael@0: GR_GL_LINE_STRIP michael@0: }; michael@0: michael@0: #define SWAP_PER_DRAW 0 michael@0: michael@0: #if SWAP_PER_DRAW michael@0: #if defined(SK_BUILD_FOR_MAC) michael@0: #include michael@0: #elif defined(SK_BUILD_FOR_WIN32) michael@0: #include michael@0: void SwapBuf() { michael@0: DWORD procID = GetCurrentProcessId(); michael@0: HWND hwnd = GetTopWindow(GetDesktopWindow()); michael@0: while(hwnd) { michael@0: DWORD wndProcID = 0; michael@0: GetWindowThreadProcessId(hwnd, &wndProcID); michael@0: if(wndProcID == procID) { michael@0: SwapBuffers(GetDC(hwnd)); michael@0: } michael@0: hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); michael@0: } michael@0: } michael@0: #endif michael@0: #endif michael@0: michael@0: void GrGpuGL::onGpuDraw(const DrawInfo& info) { michael@0: size_t indexOffsetInBytes; michael@0: this->setupGeometry(info, &indexOffsetInBytes); michael@0: michael@0: SkASSERT((size_t)info.primitiveType() < GR_ARRAY_COUNT(gPrimitiveType2GLMode)); michael@0: michael@0: if (info.isIndexed()) { michael@0: GrGLvoid* indices = michael@0: reinterpret_cast(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex()); michael@0: // info.startVertex() was accounted for by setupGeometry. michael@0: GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], michael@0: info.indexCount(), michael@0: GR_GL_UNSIGNED_SHORT, michael@0: indices)); michael@0: } else { michael@0: // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for michael@0: // startVertex in the DrawElements case. So we always rely on setupGeometry to have michael@0: // accounted for startVertex. michael@0: GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount())); michael@0: } michael@0: #if SWAP_PER_DRAW michael@0: glFlush(); michael@0: #if defined(SK_BUILD_FOR_MAC) michael@0: aglSwapBuffers(aglGetCurrentContext()); michael@0: int set_a_break_pt_here = 9; michael@0: aglSwapBuffers(aglGetCurrentContext()); michael@0: #elif defined(SK_BUILD_FOR_WIN32) michael@0: SwapBuf(); michael@0: int set_a_break_pt_here = 9; michael@0: SwapBuf(); michael@0: #endif michael@0: #endif michael@0: } michael@0: michael@0: static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) { michael@0: switch (op) { michael@0: default: michael@0: GrCrash("Unexpected path fill."); michael@0: /* fallthrough */; michael@0: case kIncClamp_StencilOp: michael@0: return GR_GL_COUNT_UP; michael@0: case kInvert_StencilOp: michael@0: return GR_GL_INVERT; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) { michael@0: SkASSERT(this->caps()->pathRenderingSupport()); michael@0: michael@0: GrGLuint id = static_cast(path)->pathID(); michael@0: SkASSERT(NULL != this->drawState()->getRenderTarget()); michael@0: SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer()); michael@0: michael@0: flushPathStencilSettings(fill); michael@0: michael@0: // Decide how to manipulate the stencil buffer based on the fill rule. michael@0: SkASSERT(!fHWPathStencilSettings.isTwoSided()); michael@0: michael@0: GrGLenum fillMode = michael@0: gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face)); michael@0: GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face); michael@0: GL_CALL(StencilFillPath(id, fillMode, writeMask)); michael@0: } michael@0: michael@0: void GrGpuGL::onGpuDrawPath(const GrPath* path, SkPath::FillType fill) { michael@0: SkASSERT(this->caps()->pathRenderingSupport()); michael@0: michael@0: GrGLuint id = static_cast(path)->pathID(); michael@0: SkASSERT(NULL != this->drawState()->getRenderTarget()); michael@0: SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer()); michael@0: SkASSERT(!fCurrentProgram->hasVertexShader()); michael@0: michael@0: flushPathStencilSettings(fill); michael@0: const SkStrokeRec& stroke = path->getStroke(); michael@0: michael@0: SkPath::FillType nonInvertedFill = SkPath::ConvertToNonInverseFillType(fill); michael@0: SkASSERT(!fHWPathStencilSettings.isTwoSided()); michael@0: GrGLenum fillMode = michael@0: gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face)); michael@0: GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face); michael@0: michael@0: if (stroke.isFillStyle() || SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle()) { michael@0: GL_CALL(StencilFillPath(id, fillMode, writeMask)); michael@0: } michael@0: if (stroke.needToApply()) { michael@0: GL_CALL(StencilStrokePath(id, 0xffff, writeMask)); michael@0: } michael@0: michael@0: if (nonInvertedFill == fill) { michael@0: if (stroke.needToApply()) { michael@0: GL_CALL(CoverStrokePath(id, GR_GL_BOUNDING_BOX)); michael@0: } else { michael@0: GL_CALL(CoverFillPath(id, GR_GL_BOUNDING_BOX)); michael@0: } michael@0: } else { michael@0: GrDrawState* drawState = this->drawState(); michael@0: GrDrawState::AutoViewMatrixRestore avmr; michael@0: SkRect bounds = SkRect::MakeLTRB(0, 0, michael@0: SkIntToScalar(drawState->getRenderTarget()->width()), michael@0: SkIntToScalar(drawState->getRenderTarget()->height())); michael@0: SkMatrix vmi; michael@0: // mapRect through persp matrix may not be correct michael@0: if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) { michael@0: vmi.mapRect(&bounds); michael@0: // theoretically could set bloat = 0, instead leave it because of matrix inversion michael@0: // precision. michael@0: SkScalar bloat = drawState->getViewMatrix().getMaxStretch() * SK_ScalarHalf; michael@0: bounds.outset(bloat, bloat); michael@0: } else { michael@0: avmr.setIdentity(drawState); michael@0: } michael@0: michael@0: this->drawSimpleRect(bounds, NULL); michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) { michael@0: GrGLRenderTarget* rt = static_cast(target); michael@0: if (rt->needsResolve()) { michael@0: // Some extensions automatically resolves the texture when it is read. michael@0: if (this->glCaps().usesMSAARenderBuffers()) { michael@0: SkASSERT(rt->textureFBOID() != rt->renderFBOID()); michael@0: GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); michael@0: GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())); michael@0: // make sure we go through flushRenderTarget() since we've modified michael@0: // the bound DRAW FBO ID. michael@0: fHWBoundRenderTarget = NULL; michael@0: const GrGLIRect& vp = rt->getViewport(); michael@0: const SkIRect dirtyRect = rt->getResolveRect(); michael@0: GrGLIRect r; michael@0: r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, michael@0: dirtyRect.width(), dirtyRect.height(), target->origin()); michael@0: michael@0: GrAutoTRestore asr; michael@0: if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { michael@0: // Apple's extension uses the scissor as the blit bounds. michael@0: asr.reset(&fScissorState); michael@0: fScissorState.fEnabled = true; michael@0: fScissorState.fRect = dirtyRect; michael@0: this->flushScissor(); michael@0: GL_CALL(ResolveMultisampleFramebuffer()); michael@0: } else { michael@0: if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { michael@0: // this respects the scissor during the blit, so disable it. michael@0: asr.reset(&fScissorState); michael@0: fScissorState.fEnabled = false; michael@0: this->flushScissor(); michael@0: } michael@0: int right = r.fLeft + r.fWidth; michael@0: int top = r.fBottom + r.fHeight; michael@0: GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, michael@0: r.fLeft, r.fBottom, right, top, michael@0: GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); michael@0: } michael@0: } michael@0: rt->flagAsResolved(); michael@0: } michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) { michael@0: static const GrGLenum gTable[] = { michael@0: GR_GL_ALWAYS, // kAlways_StencilFunc michael@0: GR_GL_NEVER, // kNever_StencilFunc michael@0: GR_GL_GREATER, // kGreater_StencilFunc michael@0: GR_GL_GEQUAL, // kGEqual_StencilFunc michael@0: GR_GL_LESS, // kLess_StencilFunc michael@0: GR_GL_LEQUAL, // kLEqual_StencilFunc, michael@0: GR_GL_EQUAL, // kEqual_StencilFunc, michael@0: GR_GL_NOTEQUAL, // kNotEqual_StencilFunc, michael@0: }; michael@0: GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kBasicStencilFuncCount); michael@0: GR_STATIC_ASSERT(0 == kAlways_StencilFunc); michael@0: GR_STATIC_ASSERT(1 == kNever_StencilFunc); michael@0: GR_STATIC_ASSERT(2 == kGreater_StencilFunc); michael@0: GR_STATIC_ASSERT(3 == kGEqual_StencilFunc); michael@0: GR_STATIC_ASSERT(4 == kLess_StencilFunc); michael@0: GR_STATIC_ASSERT(5 == kLEqual_StencilFunc); michael@0: GR_STATIC_ASSERT(6 == kEqual_StencilFunc); michael@0: GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc); michael@0: SkASSERT((unsigned) basicFunc < kBasicStencilFuncCount); michael@0: michael@0: return gTable[basicFunc]; michael@0: } michael@0: michael@0: GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { michael@0: static const GrGLenum gTable[] = { michael@0: GR_GL_KEEP, // kKeep_StencilOp michael@0: GR_GL_REPLACE, // kReplace_StencilOp michael@0: GR_GL_INCR_WRAP, // kIncWrap_StencilOp michael@0: GR_GL_INCR, // kIncClamp_StencilOp michael@0: GR_GL_DECR_WRAP, // kDecWrap_StencilOp michael@0: GR_GL_DECR, // kDecClamp_StencilOp michael@0: GR_GL_ZERO, // kZero_StencilOp michael@0: GR_GL_INVERT, // kInvert_StencilOp michael@0: }; michael@0: GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kStencilOpCount); michael@0: GR_STATIC_ASSERT(0 == kKeep_StencilOp); michael@0: GR_STATIC_ASSERT(1 == kReplace_StencilOp); michael@0: GR_STATIC_ASSERT(2 == kIncWrap_StencilOp); michael@0: GR_STATIC_ASSERT(3 == kIncClamp_StencilOp); michael@0: GR_STATIC_ASSERT(4 == kDecWrap_StencilOp); michael@0: GR_STATIC_ASSERT(5 == kDecClamp_StencilOp); michael@0: GR_STATIC_ASSERT(6 == kZero_StencilOp); michael@0: GR_STATIC_ASSERT(7 == kInvert_StencilOp); michael@0: SkASSERT((unsigned) op < kStencilOpCount); michael@0: return gTable[op]; michael@0: } michael@0: michael@0: void set_gl_stencil(const GrGLInterface* gl, michael@0: const GrStencilSettings& settings, michael@0: GrGLenum glFace, michael@0: GrStencilSettings::Face grFace) { michael@0: GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace)); michael@0: GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace)); michael@0: GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace)); michael@0: michael@0: GrGLint ref = settings.funcRef(grFace); michael@0: GrGLint mask = settings.funcMask(grFace); michael@0: GrGLint writeMask = settings.writeMask(grFace); michael@0: michael@0: if (GR_GL_FRONT_AND_BACK == glFace) { michael@0: // we call the combined func just in case separate stencil is not michael@0: // supported. michael@0: GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); michael@0: GR_GL_CALL(gl, StencilMask(writeMask)); michael@0: GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); michael@0: } else { michael@0: GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); michael@0: GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); michael@0: GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::flushStencil(DrawType type) { michael@0: if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) { michael@0: if (fStencilSettings.isDisabled()) { michael@0: if (kNo_TriState != fHWStencilTestEnabled) { michael@0: GL_CALL(Disable(GR_GL_STENCIL_TEST)); michael@0: fHWStencilTestEnabled = kNo_TriState; michael@0: } michael@0: } else { michael@0: if (kYes_TriState != fHWStencilTestEnabled) { michael@0: GL_CALL(Enable(GR_GL_STENCIL_TEST)); michael@0: fHWStencilTestEnabled = kYes_TriState; michael@0: } michael@0: } michael@0: if (!fStencilSettings.isDisabled()) { michael@0: if (this->caps()->twoSidedStencilSupport()) { michael@0: set_gl_stencil(this->glInterface(), michael@0: fStencilSettings, michael@0: GR_GL_FRONT, michael@0: GrStencilSettings::kFront_Face); michael@0: set_gl_stencil(this->glInterface(), michael@0: fStencilSettings, michael@0: GR_GL_BACK, michael@0: GrStencilSettings::kBack_Face); michael@0: } else { michael@0: set_gl_stencil(this->glInterface(), michael@0: fStencilSettings, michael@0: GR_GL_FRONT_AND_BACK, michael@0: GrStencilSettings::kFront_Face); michael@0: } michael@0: } michael@0: fHWStencilSettings = fStencilSettings; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::flushAAState(DrawType type) { michael@0: // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but michael@0: // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide. michael@0: #if 0 michael@0: // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern michael@0: #define RT_HAS_MSAA rt->isMultisampled() michael@0: #else michael@0: #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type) michael@0: #endif michael@0: michael@0: const GrRenderTarget* rt = this->getDrawState().getRenderTarget(); michael@0: if (kGL_GrGLStandard == this->glStandard()) { michael@0: // ES doesn't support toggling GL_MULTISAMPLE and doesn't have michael@0: // smooth lines. michael@0: // we prefer smooth lines over multisampled lines michael@0: bool smoothLines = false; michael@0: michael@0: if (kDrawLines_DrawType == type) { michael@0: smoothLines = this->willUseHWAALines(); michael@0: if (smoothLines) { michael@0: if (kYes_TriState != fHWAAState.fSmoothLineEnabled) { michael@0: GL_CALL(Enable(GR_GL_LINE_SMOOTH)); michael@0: fHWAAState.fSmoothLineEnabled = kYes_TriState; michael@0: // must disable msaa to use line smoothing michael@0: if (RT_HAS_MSAA && michael@0: kNo_TriState != fHWAAState.fMSAAEnabled) { michael@0: GL_CALL(Disable(GR_GL_MULTISAMPLE)); michael@0: fHWAAState.fMSAAEnabled = kNo_TriState; michael@0: } michael@0: } michael@0: } else { michael@0: if (kNo_TriState != fHWAAState.fSmoothLineEnabled) { michael@0: GL_CALL(Disable(GR_GL_LINE_SMOOTH)); michael@0: fHWAAState.fSmoothLineEnabled = kNo_TriState; michael@0: } michael@0: } michael@0: } michael@0: if (!smoothLines && RT_HAS_MSAA) { michael@0: // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths michael@0: // convex hulls of each segment appear to get filled. michael@0: bool enableMSAA = kStencilPath_DrawType == type || michael@0: this->getDrawState().isHWAntialiasState(); michael@0: if (enableMSAA) { michael@0: if (kYes_TriState != fHWAAState.fMSAAEnabled) { michael@0: GL_CALL(Enable(GR_GL_MULTISAMPLE)); michael@0: fHWAAState.fMSAAEnabled = kYes_TriState; michael@0: } michael@0: } else { michael@0: if (kNo_TriState != fHWAAState.fMSAAEnabled) { michael@0: GL_CALL(Disable(GR_GL_MULTISAMPLE)); michael@0: fHWAAState.fMSAAEnabled = kNo_TriState; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::flushPathStencilSettings(SkPath::FillType fill) { michael@0: GrStencilSettings pathStencilSettings; michael@0: this->getPathStencilSettingsForFillType(fill, &pathStencilSettings); michael@0: if (fHWPathStencilSettings != pathStencilSettings) { michael@0: // Just the func, ref, and mask is set here. The op and write mask are params to the call michael@0: // that draws the path to the SB (glStencilFillPath) michael@0: GrGLenum func = michael@0: gr_to_gl_stencil_func(pathStencilSettings.func(GrStencilSettings::kFront_Face)); michael@0: GL_CALL(PathStencilFunc(func, michael@0: pathStencilSettings.funcRef(GrStencilSettings::kFront_Face), michael@0: pathStencilSettings.funcMask(GrStencilSettings::kFront_Face))); michael@0: michael@0: fHWPathStencilSettings = pathStencilSettings; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::flushBlend(bool isLines, michael@0: GrBlendCoeff srcCoeff, michael@0: GrBlendCoeff dstCoeff) { michael@0: if (isLines && this->willUseHWAALines()) { michael@0: if (kYes_TriState != fHWBlendState.fEnabled) { michael@0: GL_CALL(Enable(GR_GL_BLEND)); michael@0: fHWBlendState.fEnabled = kYes_TriState; michael@0: } michael@0: if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff || michael@0: kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) { michael@0: GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff], michael@0: gXfermodeCoeff2Blend[kISA_GrBlendCoeff])); michael@0: fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff; michael@0: fHWBlendState.fDstCoeff = kISA_GrBlendCoeff; michael@0: } michael@0: } else { michael@0: // any optimization to disable blending should michael@0: // have already been applied and tweaked the coeffs michael@0: // to (1, 0). michael@0: bool blendOff = kOne_GrBlendCoeff == srcCoeff && michael@0: kZero_GrBlendCoeff == dstCoeff; michael@0: if (blendOff) { michael@0: if (kNo_TriState != fHWBlendState.fEnabled) { michael@0: GL_CALL(Disable(GR_GL_BLEND)); michael@0: fHWBlendState.fEnabled = kNo_TriState; michael@0: } michael@0: } else { michael@0: if (kYes_TriState != fHWBlendState.fEnabled) { michael@0: GL_CALL(Enable(GR_GL_BLEND)); michael@0: fHWBlendState.fEnabled = kYes_TriState; michael@0: } michael@0: if (fHWBlendState.fSrcCoeff != srcCoeff || michael@0: fHWBlendState.fDstCoeff != dstCoeff) { michael@0: GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], michael@0: gXfermodeCoeff2Blend[dstCoeff])); michael@0: fHWBlendState.fSrcCoeff = srcCoeff; michael@0: fHWBlendState.fDstCoeff = dstCoeff; michael@0: } michael@0: GrColor blendConst = this->getDrawState().getBlendConstant(); michael@0: if ((BlendCoeffReferencesConstant(srcCoeff) || michael@0: BlendCoeffReferencesConstant(dstCoeff)) && michael@0: (!fHWBlendState.fConstColorValid || michael@0: fHWBlendState.fConstColor != blendConst)) { michael@0: GrGLfloat c[4]; michael@0: GrColorToRGBAFloat(blendConst, c); michael@0: GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); michael@0: fHWBlendState.fConstColor = blendConst; michael@0: fHWBlendState.fConstColorValid = true; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { michael@0: static const GrGLenum gWrapModes[] = { michael@0: GR_GL_CLAMP_TO_EDGE, michael@0: GR_GL_REPEAT, michael@0: GR_GL_MIRRORED_REPEAT michael@0: }; michael@0: GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); michael@0: GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); michael@0: GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); michael@0: GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); michael@0: return gWrapModes[tm]; michael@0: } michael@0: michael@0: void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) { michael@0: SkASSERT(NULL != texture); michael@0: michael@0: // If we created a rt/tex and rendered to it without using a texture and now we're texturing michael@0: // from the rt it will still be the last bound texture, but it needs resolving. So keep this michael@0: // out of the "last != next" check. michael@0: GrGLRenderTarget* texRT = static_cast(texture->asRenderTarget()); michael@0: if (NULL != texRT) { michael@0: this->onResolveRenderTarget(texRT); michael@0: } michael@0: michael@0: if (fHWBoundTextures[unitIdx] != texture) { michael@0: this->setTextureUnit(unitIdx); michael@0: GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID())); michael@0: fHWBoundTextures[unitIdx] = texture; michael@0: } michael@0: michael@0: ResetTimestamp timestamp; michael@0: const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); michael@0: bool setAll = timestamp < this->getResetTimestamp(); michael@0: GrGLTexture::TexParams newTexParams; michael@0: michael@0: static GrGLenum glMinFilterModes[] = { michael@0: GR_GL_NEAREST, michael@0: GR_GL_LINEAR, michael@0: GR_GL_LINEAR_MIPMAP_LINEAR michael@0: }; michael@0: static GrGLenum glMagFilterModes[] = { michael@0: GR_GL_NEAREST, michael@0: GR_GL_LINEAR, michael@0: GR_GL_LINEAR michael@0: }; michael@0: GrTextureParams::FilterMode filterMode = params.filterMode(); michael@0: if (!this->caps()->mipMapSupport() && GrTextureParams::kMipMap_FilterMode == filterMode) { michael@0: filterMode = GrTextureParams::kBilerp_FilterMode; michael@0: } michael@0: newTexParams.fMinFilter = glMinFilterModes[filterMode]; michael@0: newTexParams.fMagFilter = glMagFilterModes[filterMode]; michael@0: michael@0: if (GrTextureParams::kMipMap_FilterMode == filterMode && texture->mipMapsAreDirty()) { michael@0: // GL_CALL(Hint(GR_GL_GENERATE_MIPMAP_HINT,GR_GL_NICEST)); michael@0: GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D)); michael@0: texture->dirtyMipMaps(false); michael@0: } michael@0: michael@0: newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); michael@0: newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); michael@0: memcpy(newTexParams.fSwizzleRGBA, michael@0: GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()), michael@0: sizeof(newTexParams.fSwizzleRGBA)); michael@0: if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { michael@0: this->setTextureUnit(unitIdx); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_MAG_FILTER, michael@0: newTexParams.fMagFilter)); michael@0: } michael@0: if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { michael@0: this->setTextureUnit(unitIdx); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_MIN_FILTER, michael@0: newTexParams.fMinFilter)); michael@0: } michael@0: if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { michael@0: this->setTextureUnit(unitIdx); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_WRAP_S, michael@0: newTexParams.fWrapS)); michael@0: } michael@0: if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { michael@0: this->setTextureUnit(unitIdx); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, michael@0: GR_GL_TEXTURE_WRAP_T, michael@0: newTexParams.fWrapT)); michael@0: } michael@0: if (this->glCaps().textureSwizzleSupport() && michael@0: (setAll || memcmp(newTexParams.fSwizzleRGBA, michael@0: oldTexParams.fSwizzleRGBA, michael@0: sizeof(newTexParams.fSwizzleRGBA)))) { michael@0: this->setTextureUnit(unitIdx); michael@0: if (this->glStandard() == kGLES_GrGLStandard) { michael@0: // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. michael@0: const GrGLenum* swizzle = newTexParams.fSwizzleRGBA; michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0])); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1])); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2])); michael@0: GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3])); michael@0: } else { michael@0: GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint)); michael@0: const GrGLint* swizzle = reinterpret_cast(newTexParams.fSwizzleRGBA); michael@0: GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle)); michael@0: } michael@0: } michael@0: texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); michael@0: } michael@0: michael@0: void GrGpuGL::setProjectionMatrix(const SkMatrix& matrix, michael@0: const SkISize& renderTargetSize, michael@0: GrSurfaceOrigin renderTargetOrigin) { michael@0: michael@0: SkASSERT(this->glCaps().fixedFunctionSupport()); michael@0: michael@0: if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin && michael@0: renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize && michael@0: matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) { michael@0: return; michael@0: } michael@0: michael@0: fHWProjectionMatrixState.fViewMatrix = matrix; michael@0: fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize; michael@0: fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin; michael@0: michael@0: GrGLfloat glMatrix[4 * 4]; michael@0: fHWProjectionMatrixState.getGLMatrix<4>(glMatrix); michael@0: GL_CALL(MatrixMode(GR_GL_PROJECTION)); michael@0: GL_CALL(LoadMatrixf(glMatrix)); michael@0: } michael@0: michael@0: void GrGpuGL::enableTexGen(int unitIdx, michael@0: TexGenComponents components, michael@0: const GrGLfloat* coefficients) { michael@0: SkASSERT(this->glCaps().fixedFunctionSupport()); michael@0: SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents); michael@0: SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= unitIdx); michael@0: michael@0: if (GR_GL_OBJECT_LINEAR == fHWTexGenSettings[unitIdx].fMode && michael@0: components == fHWTexGenSettings[unitIdx].fNumComponents && michael@0: !memcmp(coefficients, fHWTexGenSettings[unitIdx].fCoefficients, michael@0: 3 * components * sizeof(GrGLfloat))) { michael@0: return; michael@0: } michael@0: michael@0: this->setTextureUnit(unitIdx); michael@0: michael@0: if (GR_GL_OBJECT_LINEAR != fHWTexGenSettings[unitIdx].fMode) { michael@0: for (int i = 0; i < 4; i++) { michael@0: GL_CALL(TexGeni(GR_GL_S + i, GR_GL_TEXTURE_GEN_MODE, GR_GL_OBJECT_LINEAR)); michael@0: } michael@0: fHWTexGenSettings[unitIdx].fMode = GR_GL_OBJECT_LINEAR; michael@0: } michael@0: michael@0: for (int i = fHWTexGenSettings[unitIdx].fNumComponents; i < components; i++) { michael@0: GL_CALL(Enable(GR_GL_TEXTURE_GEN_S + i)); michael@0: } michael@0: for (int i = components; i < fHWTexGenSettings[unitIdx].fNumComponents; i++) { michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + i)); michael@0: } michael@0: fHWTexGenSettings[unitIdx].fNumComponents = components; michael@0: michael@0: for (int i = 0; i < components; i++) { michael@0: GrGLfloat plane[] = {coefficients[0 + 3 * i], michael@0: coefficients[1 + 3 * i], michael@0: 0, michael@0: coefficients[2 + 3 * i]}; michael@0: GL_CALL(TexGenfv(GR_GL_S + i, GR_GL_OBJECT_PLANE, plane)); michael@0: } michael@0: michael@0: if (this->caps()->pathRenderingSupport()) { michael@0: GL_CALL(PathTexGen(GR_GL_TEXTURE0 + unitIdx, michael@0: GR_GL_OBJECT_LINEAR, michael@0: components, michael@0: coefficients)); michael@0: } michael@0: michael@0: memcpy(fHWTexGenSettings[unitIdx].fCoefficients, coefficients, michael@0: 3 * components * sizeof(GrGLfloat)); michael@0: } michael@0: michael@0: void GrGpuGL::enableTexGen(int unitIdx, TexGenComponents components, const SkMatrix& matrix) { michael@0: GrGLfloat coefficients[3 * 3]; michael@0: SkASSERT(this->glCaps().fixedFunctionSupport()); michael@0: SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents); michael@0: michael@0: coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]); michael@0: coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]); michael@0: coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]); michael@0: michael@0: if (components >= kST_TexGenComponents) { michael@0: coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]); michael@0: coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]); michael@0: coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]); michael@0: } michael@0: michael@0: if (components >= kSTR_TexGenComponents) { michael@0: coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]); michael@0: coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]); michael@0: coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]); michael@0: } michael@0: michael@0: enableTexGen(unitIdx, components, coefficients); michael@0: } michael@0: michael@0: void GrGpuGL::flushTexGenSettings(int numUsedTexCoordSets) { michael@0: SkASSERT(this->glCaps().fixedFunctionSupport()); michael@0: SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= numUsedTexCoordSets); michael@0: michael@0: // Only write the inactive tex gens, since active tex gens were written michael@0: // when they were enabled. michael@0: michael@0: SkDEBUGCODE( michael@0: for (int i = 0; i < numUsedTexCoordSets; i++) { michael@0: SkASSERT(0 != fHWTexGenSettings[i].fNumComponents); michael@0: } michael@0: ); michael@0: michael@0: for (int i = numUsedTexCoordSets; i < fHWActiveTexGenSets; i++) { michael@0: SkASSERT(0 != fHWTexGenSettings[i].fNumComponents); michael@0: michael@0: this->setTextureUnit(i); michael@0: for (int j = 0; j < fHWTexGenSettings[i].fNumComponents; j++) { michael@0: GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + j)); michael@0: } michael@0: michael@0: if (this->caps()->pathRenderingSupport()) { michael@0: GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL)); michael@0: } michael@0: michael@0: fHWTexGenSettings[i].fNumComponents = 0; michael@0: } michael@0: michael@0: fHWActiveTexGenSets = numUsedTexCoordSets; michael@0: } michael@0: michael@0: void GrGpuGL::flushMiscFixedFunctionState() { michael@0: michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: michael@0: if (drawState.isDitherState()) { michael@0: if (kYes_TriState != fHWDitherEnabled) { michael@0: GL_CALL(Enable(GR_GL_DITHER)); michael@0: fHWDitherEnabled = kYes_TriState; michael@0: } michael@0: } else { michael@0: if (kNo_TriState != fHWDitherEnabled) { michael@0: GL_CALL(Disable(GR_GL_DITHER)); michael@0: fHWDitherEnabled = kNo_TriState; michael@0: } michael@0: } michael@0: michael@0: if (drawState.isColorWriteDisabled()) { michael@0: if (kNo_TriState != fHWWriteToColor) { michael@0: GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, michael@0: GR_GL_FALSE, GR_GL_FALSE)); michael@0: fHWWriteToColor = kNo_TriState; michael@0: } michael@0: } else { michael@0: if (kYes_TriState != fHWWriteToColor) { michael@0: GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); michael@0: fHWWriteToColor = kYes_TriState; michael@0: } michael@0: } michael@0: michael@0: if (fHWDrawFace != drawState.getDrawFace()) { michael@0: switch (this->getDrawState().getDrawFace()) { michael@0: case GrDrawState::kCCW_DrawFace: michael@0: GL_CALL(Enable(GR_GL_CULL_FACE)); michael@0: GL_CALL(CullFace(GR_GL_BACK)); michael@0: break; michael@0: case GrDrawState::kCW_DrawFace: michael@0: GL_CALL(Enable(GR_GL_CULL_FACE)); michael@0: GL_CALL(CullFace(GR_GL_FRONT)); michael@0: break; michael@0: case GrDrawState::kBoth_DrawFace: michael@0: GL_CALL(Disable(GR_GL_CULL_FACE)); michael@0: break; michael@0: default: michael@0: GrCrash("Unknown draw face."); michael@0: } michael@0: fHWDrawFace = drawState.getDrawFace(); michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) { michael@0: SkASSERT(NULL != renderTarget); michael@0: if (fHWBoundRenderTarget == renderTarget) { michael@0: fHWBoundRenderTarget = NULL; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) { michael@0: for (int s = 0; s < fHWBoundTextures.count(); ++s) { michael@0: if (fHWBoundTextures[s] == texture) { michael@0: // deleting bound texture does implied bind to 0 michael@0: fHWBoundTextures[s] = NULL; michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::configToGLFormats(GrPixelConfig config, michael@0: bool getSizedInternalFormat, michael@0: GrGLenum* internalFormat, michael@0: GrGLenum* externalFormat, michael@0: GrGLenum* externalType) { michael@0: GrGLenum dontCare; michael@0: if (NULL == internalFormat) { michael@0: internalFormat = &dontCare; michael@0: } michael@0: if (NULL == externalFormat) { michael@0: externalFormat = &dontCare; michael@0: } michael@0: if (NULL == externalType) { michael@0: externalType = &dontCare; michael@0: } michael@0: michael@0: switch (config) { michael@0: case kRGBA_8888_GrPixelConfig: michael@0: *internalFormat = GR_GL_RGBA; michael@0: *externalFormat = GR_GL_RGBA; michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_RGBA8; michael@0: } else { michael@0: *internalFormat = GR_GL_RGBA; michael@0: } michael@0: *externalType = GR_GL_UNSIGNED_BYTE; michael@0: break; michael@0: case kBGRA_8888_GrPixelConfig: michael@0: if (!this->glCaps().bgraFormatSupport()) { michael@0: return false; michael@0: } michael@0: if (this->glCaps().bgraIsInternalFormat()) { michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_BGRA8; michael@0: } else { michael@0: *internalFormat = GR_GL_BGRA; michael@0: } michael@0: } else { michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_RGBA8; michael@0: } else { michael@0: *internalFormat = GR_GL_RGBA; michael@0: } michael@0: } michael@0: *externalFormat = GR_GL_BGRA; michael@0: *externalType = GR_GL_UNSIGNED_BYTE; michael@0: break; michael@0: case kRGB_565_GrPixelConfig: michael@0: *internalFormat = GR_GL_RGB; michael@0: *externalFormat = GR_GL_RGB; michael@0: if (getSizedInternalFormat) { michael@0: if (this->glStandard() == kGL_GrGLStandard) { michael@0: return false; michael@0: } else { michael@0: *internalFormat = GR_GL_RGB565; michael@0: } michael@0: } else { michael@0: *internalFormat = GR_GL_RGB; michael@0: } michael@0: *externalType = GR_GL_UNSIGNED_SHORT_5_6_5; michael@0: break; michael@0: case kRGBA_4444_GrPixelConfig: michael@0: *internalFormat = GR_GL_RGBA; michael@0: *externalFormat = GR_GL_RGBA; michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_RGBA4; michael@0: } else { michael@0: *internalFormat = GR_GL_RGBA; michael@0: } michael@0: *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4; michael@0: break; michael@0: case kIndex_8_GrPixelConfig: michael@0: if (this->caps()->eightBitPaletteSupport()) { michael@0: *internalFormat = GR_GL_PALETTE8_RGBA8; michael@0: // glCompressedTexImage doesn't take external params michael@0: *externalFormat = GR_GL_PALETTE8_RGBA8; michael@0: // no sized/unsized internal format distinction here michael@0: *internalFormat = GR_GL_PALETTE8_RGBA8; michael@0: // unused with CompressedTexImage michael@0: *externalType = GR_GL_UNSIGNED_BYTE; michael@0: } else { michael@0: return false; michael@0: } michael@0: break; michael@0: case kAlpha_8_GrPixelConfig: michael@0: if (this->glCaps().textureRedSupport()) { michael@0: *internalFormat = GR_GL_RED; michael@0: *externalFormat = GR_GL_RED; michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_R8; michael@0: } else { michael@0: *internalFormat = GR_GL_RED; michael@0: } michael@0: *externalType = GR_GL_UNSIGNED_BYTE; michael@0: } else { michael@0: *internalFormat = GR_GL_ALPHA; michael@0: *externalFormat = GR_GL_ALPHA; michael@0: if (getSizedInternalFormat) { michael@0: *internalFormat = GR_GL_ALPHA8; michael@0: } else { michael@0: *internalFormat = GR_GL_ALPHA; michael@0: } michael@0: *externalType = GR_GL_UNSIGNED_BYTE; michael@0: } michael@0: break; michael@0: default: michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void GrGpuGL::setTextureUnit(int unit) { michael@0: SkASSERT(unit >= 0 && unit < fHWBoundTextures.count()); michael@0: if (unit != fHWActiveTextureUnitIdx) { michael@0: GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); michael@0: fHWActiveTextureUnitIdx = unit; michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::setScratchTextureUnit() { michael@0: // Bind the last texture unit since it is the least likely to be used by GrGLProgram. michael@0: int lastUnitIdx = fHWBoundTextures.count() - 1; michael@0: if (lastUnitIdx != fHWActiveTextureUnitIdx) { michael@0: GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); michael@0: fHWActiveTextureUnitIdx = lastUnitIdx; michael@0: } michael@0: // clear out the this field so that if a program does use this unit it will rebind the correct michael@0: // texture. michael@0: fHWBoundTextures[lastUnitIdx] = NULL; michael@0: } michael@0: michael@0: namespace { michael@0: // Determines whether glBlitFramebuffer could be used between src and dst. michael@0: inline bool can_blit_framebuffer(const GrSurface* dst, michael@0: const GrSurface* src, michael@0: const GrGpuGL* gpu, michael@0: bool* wouldNeedTempFBO = NULL) { michael@0: if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) && michael@0: gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && michael@0: gpu->glCaps().usesMSAARenderBuffers()) { michael@0: // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match michael@0: // or the rects are not the same (not just the same size but have the same edges). michael@0: if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && michael@0: (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { michael@0: return false; michael@0: } michael@0: if (NULL != wouldNeedTempFBO) { michael@0: *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget(); michael@0: } michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: inline bool can_copy_texsubimage(const GrSurface* dst, michael@0: const GrSurface* src, michael@0: const GrGpuGL* gpu, michael@0: bool* wouldNeedTempFBO = NULL) { michael@0: // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage michael@0: // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps michael@0: // many drivers would allow it to work, but ANGLE does not. michael@0: if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() && michael@0: (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) { michael@0: return false; michael@0: } michael@0: const GrGLRenderTarget* dstRT = static_cast(dst->asRenderTarget()); michael@0: // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer) michael@0: // then we don't want to copy to the texture but to the MSAA buffer. michael@0: if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { michael@0: return false; michael@0: } michael@0: const GrGLRenderTarget* srcRT = static_cast(src->asRenderTarget()); michael@0: // If the src is multisampled (and uses an extension where there is a separate MSAA michael@0: // renderbuffer) then it is an invalid operation to call CopyTexSubImage michael@0: if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { michael@0: return false; michael@0: } michael@0: if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && michael@0: NULL != dst->asTexture() && michael@0: dst->origin() == src->origin() && michael@0: kIndex_8_GrPixelConfig != src->config()) { michael@0: if (NULL != wouldNeedTempFBO) { michael@0: *wouldNeedTempFBO = NULL == src->asRenderTarget(); michael@0: } michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is michael@0: // relative to is output. michael@0: inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl, michael@0: GrSurface* surface, michael@0: GrGLenum fboTarget, michael@0: GrGLIRect* viewport) { michael@0: GrGLRenderTarget* rt = static_cast(surface->asRenderTarget()); michael@0: GrGLuint tempFBOID; michael@0: if (NULL == rt) { michael@0: SkASSERT(NULL != surface->asTexture()); michael@0: GrGLuint texID = static_cast(surface->asTexture())->textureID(); michael@0: GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID)); michael@0: GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID)); michael@0: GR_GL_CALL(gl, FramebufferTexture2D(fboTarget, michael@0: GR_GL_COLOR_ATTACHMENT0, michael@0: GR_GL_TEXTURE_2D, michael@0: texID, michael@0: 0)); michael@0: viewport->fLeft = 0; michael@0: viewport->fBottom = 0; michael@0: viewport->fWidth = surface->width(); michael@0: viewport->fHeight = surface->height(); michael@0: } else { michael@0: tempFBOID = 0; michael@0: GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID())); michael@0: *viewport = rt->getViewport(); michael@0: } michael@0: return tempFBOID; michael@0: } michael@0: michael@0: } michael@0: michael@0: void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { michael@0: // Check for format issues with glCopyTexSubImage2D michael@0: if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() && michael@0: kBGRA_8888_GrPixelConfig == src->config()) { michael@0: // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target michael@0: // in order to call glBlitFramebuffer or to copy to it by rendering. michael@0: INHERITED::initCopySurfaceDstDesc(src, desc); michael@0: return; michael@0: } else if (NULL == src->asRenderTarget()) { michael@0: // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base michael@0: // class handle it by rendering. michael@0: INHERITED::initCopySurfaceDstDesc(src, desc); michael@0: return; michael@0: } michael@0: michael@0: const GrGLRenderTarget* srcRT = static_cast(src->asRenderTarget()); michael@0: if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { michael@0: // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. michael@0: INHERITED::initCopySurfaceDstDesc(src, desc); michael@0: } else { michael@0: desc->fConfig = src->config(); michael@0: desc->fOrigin = src->origin(); michael@0: desc->fFlags = kNone_GrTextureFlags; michael@0: } michael@0: } michael@0: michael@0: bool GrGpuGL::onCopySurface(GrSurface* dst, michael@0: GrSurface* src, michael@0: const SkIRect& srcRect, michael@0: const SkIPoint& dstPoint) { michael@0: bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); michael@0: bool copied = false; michael@0: bool wouldNeedTempFBO = false; michael@0: if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && michael@0: (!wouldNeedTempFBO || !inheritedCouldCopy)) { michael@0: GrGLuint srcFBO; michael@0: GrGLIRect srcVP; michael@0: srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP); michael@0: GrGLTexture* dstTex = static_cast(dst->asTexture()); michael@0: SkASSERT(NULL != dstTex); michael@0: // We modified the bound FBO michael@0: fHWBoundRenderTarget = NULL; michael@0: GrGLIRect srcGLRect; michael@0: srcGLRect.setRelativeTo(srcVP, michael@0: srcRect.fLeft, michael@0: srcRect.fTop, michael@0: srcRect.width(), michael@0: srcRect.height(), michael@0: src->origin()); michael@0: michael@0: this->setScratchTextureUnit(); michael@0: GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID())); michael@0: GrGLint dstY; michael@0: if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { michael@0: dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); michael@0: } else { michael@0: dstY = dstPoint.fY; michael@0: } michael@0: GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0, michael@0: dstPoint.fX, dstY, michael@0: srcGLRect.fLeft, srcGLRect.fBottom, michael@0: srcGLRect.fWidth, srcGLRect.fHeight)); michael@0: copied = true; michael@0: if (srcFBO) { michael@0: GL_CALL(DeleteFramebuffers(1, &srcFBO)); michael@0: } michael@0: } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && michael@0: (!wouldNeedTempFBO || !inheritedCouldCopy)) { michael@0: SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, michael@0: srcRect.width(), srcRect.height()); michael@0: bool selfOverlap = false; michael@0: if (dst->isSameAs(src)) { michael@0: selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect); michael@0: } michael@0: michael@0: if (!selfOverlap) { michael@0: GrGLuint dstFBO; michael@0: GrGLuint srcFBO; michael@0: GrGLIRect dstVP; michael@0: GrGLIRect srcVP; michael@0: dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP); michael@0: srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP); michael@0: // We modified the bound FBO michael@0: fHWBoundRenderTarget = NULL; michael@0: GrGLIRect srcGLRect; michael@0: GrGLIRect dstGLRect; michael@0: srcGLRect.setRelativeTo(srcVP, michael@0: srcRect.fLeft, michael@0: srcRect.fTop, michael@0: srcRect.width(), michael@0: srcRect.height(), michael@0: src->origin()); michael@0: dstGLRect.setRelativeTo(dstVP, michael@0: dstRect.fLeft, michael@0: dstRect.fTop, michael@0: dstRect.width(), michael@0: dstRect.height(), michael@0: dst->origin()); michael@0: michael@0: GrAutoTRestore asr; michael@0: if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { michael@0: // The EXT version applies the scissor during the blit, so disable it. michael@0: asr.reset(&fScissorState); michael@0: fScissorState.fEnabled = false; michael@0: this->flushScissor(); michael@0: } michael@0: GrGLint srcY0; michael@0: GrGLint srcY1; michael@0: // Does the blit need to y-mirror or not? michael@0: if (src->origin() == dst->origin()) { michael@0: srcY0 = srcGLRect.fBottom; michael@0: srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; michael@0: } else { michael@0: srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; michael@0: srcY1 = srcGLRect.fBottom; michael@0: } michael@0: GL_CALL(BlitFramebuffer(srcGLRect.fLeft, michael@0: srcY0, michael@0: srcGLRect.fLeft + srcGLRect.fWidth, michael@0: srcY1, michael@0: dstGLRect.fLeft, michael@0: dstGLRect.fBottom, michael@0: dstGLRect.fLeft + dstGLRect.fWidth, michael@0: dstGLRect.fBottom + dstGLRect.fHeight, michael@0: GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); michael@0: if (dstFBO) { michael@0: GL_CALL(DeleteFramebuffers(1, &dstFBO)); michael@0: } michael@0: if (srcFBO) { michael@0: GL_CALL(DeleteFramebuffers(1, &srcFBO)); michael@0: } michael@0: copied = true; michael@0: } michael@0: } michael@0: if (!copied && inheritedCouldCopy) { michael@0: copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint); michael@0: SkASSERT(copied); michael@0: } michael@0: return copied; michael@0: } michael@0: michael@0: bool GrGpuGL::onCanCopySurface(GrSurface* dst, michael@0: GrSurface* src, michael@0: const SkIRect& srcRect, michael@0: const SkIPoint& dstPoint) { michael@0: // This mirrors the logic in onCopySurface. michael@0: if (can_copy_texsubimage(dst, src, this)) { michael@0: return true; michael@0: } michael@0: if (can_blit_framebuffer(dst, src, this)) { michael@0: if (dst->isSameAs(src)) { michael@0: SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, michael@0: srcRect.width(), srcRect.height()); michael@0: if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { michael@0: return true; michael@0: } michael@0: } else { michael@0: return true; michael@0: } michael@0: } michael@0: return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); michael@0: } michael@0: michael@0: void GrGpuGL::onInstantGpuTraceEvent(const char* marker) { michael@0: if (this->caps()->gpuTracingSupport()) { michael@0: // GL_CALL(InsertEventMarker(0, marker)); michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::onPushGpuTraceEvent(const char* marker) { michael@0: if (this->caps()->gpuTracingSupport()) { michael@0: // GL_CALL(PushGroupMarker(0, marker)); michael@0: } michael@0: } michael@0: michael@0: void GrGpuGL::onPopGpuTraceEvent() { michael@0: if (this->caps()->gpuTracingSupport()) { michael@0: // GL_CALL(PopGroupMarker()); michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw( michael@0: GrGpuGL* gpu, michael@0: const GrGLVertexBuffer* vbuffer, michael@0: const GrGLIndexBuffer* ibuffer) { michael@0: SkASSERT(NULL != vbuffer); michael@0: GrGLAttribArrayState* attribState; michael@0: michael@0: // We use a vertex array if we're on a core profile and the verts are in a VBO. michael@0: if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { michael@0: if (NULL == fVBOVertexArray || !fVBOVertexArray->isValid()) { michael@0: SkSafeUnref(fVBOVertexArray); michael@0: GrGLuint arrayID; michael@0: GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); michael@0: int attrCount = gpu->glCaps().maxVertexAttributes(); michael@0: fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount)); michael@0: } michael@0: attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); michael@0: } else { michael@0: if (NULL != ibuffer) { michael@0: this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()); michael@0: } else { michael@0: this->setVertexArrayID(gpu, 0); michael@0: } michael@0: int attrCount = gpu->glCaps().maxVertexAttributes(); michael@0: if (fDefaultVertexArrayAttribState.count() != attrCount) { michael@0: fDefaultVertexArrayAttribState.resize(attrCount); michael@0: } michael@0: attribState = &fDefaultVertexArrayAttribState; michael@0: } michael@0: return attribState; michael@0: }