michael@0: /* michael@0: * Copyright (c) 2011 The WebM project authors. All Rights Reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: #include michael@0: #include michael@0: michael@0: #include "./vpx_config.h" michael@0: #include "vp9/common/vp9_common.h" michael@0: #include "vp9/encoder/vp9_lookahead.h" michael@0: #include "vp9/common/vp9_extend.h" michael@0: michael@0: struct lookahead_ctx { michael@0: unsigned int max_sz; /* Absolute size of the queue */ michael@0: unsigned int sz; /* Number of buffers currently in the queue */ michael@0: unsigned int read_idx; /* Read index */ michael@0: unsigned int write_idx; /* Write index */ michael@0: struct lookahead_entry *buf; /* Buffer list */ michael@0: }; michael@0: michael@0: michael@0: /* Return the buffer at the given absolute index and increment the index */ michael@0: static struct lookahead_entry * pop(struct lookahead_ctx *ctx, michael@0: unsigned int *idx) { michael@0: unsigned int index = *idx; michael@0: struct lookahead_entry *buf = ctx->buf + index; michael@0: michael@0: assert(index < ctx->max_sz); michael@0: if (++index >= ctx->max_sz) michael@0: index -= ctx->max_sz; michael@0: *idx = index; michael@0: return buf; michael@0: } michael@0: michael@0: michael@0: void vp9_lookahead_destroy(struct lookahead_ctx *ctx) { michael@0: if (ctx) { michael@0: if (ctx->buf) { michael@0: unsigned int i; michael@0: michael@0: for (i = 0; i < ctx->max_sz; i++) michael@0: vp9_free_frame_buffer(&ctx->buf[i].img); michael@0: free(ctx->buf); michael@0: } michael@0: free(ctx); michael@0: } michael@0: } michael@0: michael@0: michael@0: struct lookahead_ctx * vp9_lookahead_init(unsigned int width, michael@0: unsigned int height, michael@0: unsigned int subsampling_x, michael@0: unsigned int subsampling_y, michael@0: unsigned int depth) { michael@0: struct lookahead_ctx *ctx = NULL; michael@0: michael@0: // Clamp the lookahead queue depth michael@0: depth = clamp(depth, 1, MAX_LAG_BUFFERS); michael@0: michael@0: // Allocate the lookahead structures michael@0: ctx = calloc(1, sizeof(*ctx)); michael@0: if (ctx) { michael@0: unsigned int i; michael@0: ctx->max_sz = depth; michael@0: ctx->buf = calloc(depth, sizeof(*ctx->buf)); michael@0: if (!ctx->buf) michael@0: goto bail; michael@0: for (i = 0; i < depth; i++) michael@0: if (vp9_alloc_frame_buffer(&ctx->buf[i].img, michael@0: width, height, subsampling_x, subsampling_y, michael@0: VP9BORDERINPIXELS)) michael@0: goto bail; michael@0: } michael@0: return ctx; michael@0: bail: michael@0: vp9_lookahead_destroy(ctx); michael@0: return NULL; michael@0: } michael@0: michael@0: #define USE_PARTIAL_COPY 0 michael@0: michael@0: int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, michael@0: int64_t ts_start, int64_t ts_end, unsigned int flags, michael@0: unsigned char *active_map) { michael@0: struct lookahead_entry *buf; michael@0: #if USE_PARTIAL_COPY michael@0: int row, col, active_end; michael@0: int mb_rows = (src->y_height + 15) >> 4; michael@0: int mb_cols = (src->y_width + 15) >> 4; michael@0: #endif michael@0: michael@0: if (ctx->sz + 1 > ctx->max_sz) michael@0: return 1; michael@0: ctx->sz++; michael@0: buf = pop(ctx, &ctx->write_idx); michael@0: michael@0: #if USE_PARTIAL_COPY michael@0: // TODO(jkoleszar): This is disabled for now, as michael@0: // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware. michael@0: michael@0: // Only do this partial copy if the following conditions are all met: michael@0: // 1. Lookahead queue has has size of 1. michael@0: // 2. Active map is provided. michael@0: // 3. This is not a key frame, golden nor altref frame. michael@0: if (ctx->max_sz == 1 && active_map && !flags) { michael@0: for (row = 0; row < mb_rows; ++row) { michael@0: col = 0; michael@0: michael@0: while (1) { michael@0: // Find the first active macroblock in this row. michael@0: for (; col < mb_cols; ++col) { michael@0: if (active_map[col]) michael@0: break; michael@0: } michael@0: michael@0: // No more active macroblock in this row. michael@0: if (col == mb_cols) michael@0: break; michael@0: michael@0: // Find the end of active region in this row. michael@0: active_end = col; michael@0: michael@0: for (; active_end < mb_cols; ++active_end) { michael@0: if (!active_map[active_end]) michael@0: break; michael@0: } michael@0: michael@0: // Only copy this active region. michael@0: vp9_copy_and_extend_frame_with_rect(src, &buf->img, michael@0: row << 4, michael@0: col << 4, 16, michael@0: (active_end - col) << 4); michael@0: michael@0: // Start again from the end of this active region. michael@0: col = active_end; michael@0: } michael@0: michael@0: active_map += mb_cols; michael@0: } michael@0: } else { michael@0: vp9_copy_and_extend_frame(src, &buf->img); michael@0: } michael@0: #else michael@0: // Partial copy not implemented yet michael@0: vp9_copy_and_extend_frame(src, &buf->img); michael@0: #endif michael@0: michael@0: buf->ts_start = ts_start; michael@0: buf->ts_end = ts_end; michael@0: buf->flags = flags; michael@0: return 0; michael@0: } michael@0: michael@0: michael@0: struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx, michael@0: int drain) { michael@0: struct lookahead_entry *buf = NULL; michael@0: michael@0: if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) { michael@0: buf = pop(ctx, &ctx->read_idx); michael@0: ctx->sz--; michael@0: } michael@0: return buf; michael@0: } michael@0: michael@0: michael@0: struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx, michael@0: int index) { michael@0: struct lookahead_entry *buf = NULL; michael@0: michael@0: assert(index < (int)ctx->max_sz); michael@0: if (index < (int)ctx->sz) { michael@0: index += ctx->read_idx; michael@0: if (index >= (int)ctx->max_sz) michael@0: index -= ctx->max_sz; michael@0: buf = ctx->buf + index; michael@0: } michael@0: return buf; michael@0: } michael@0: michael@0: unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { michael@0: return ctx->sz; michael@0: }