Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright (c) 2011 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license |
michael@0 | 5 | * that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | * tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | * in the file PATENTS. All contributing project authors may |
michael@0 | 8 | * be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | */ |
michael@0 | 10 | #include <assert.h> |
michael@0 | 11 | #include <stdlib.h> |
michael@0 | 12 | |
michael@0 | 13 | #include "./vpx_config.h" |
michael@0 | 14 | #include "vp9/common/vp9_common.h" |
michael@0 | 15 | #include "vp9/encoder/vp9_lookahead.h" |
michael@0 | 16 | #include "vp9/common/vp9_extend.h" |
michael@0 | 17 | |
michael@0 | 18 | struct lookahead_ctx { |
michael@0 | 19 | unsigned int max_sz; /* Absolute size of the queue */ |
michael@0 | 20 | unsigned int sz; /* Number of buffers currently in the queue */ |
michael@0 | 21 | unsigned int read_idx; /* Read index */ |
michael@0 | 22 | unsigned int write_idx; /* Write index */ |
michael@0 | 23 | struct lookahead_entry *buf; /* Buffer list */ |
michael@0 | 24 | }; |
michael@0 | 25 | |
michael@0 | 26 | |
michael@0 | 27 | /* Return the buffer at the given absolute index and increment the index */ |
michael@0 | 28 | static struct lookahead_entry * pop(struct lookahead_ctx *ctx, |
michael@0 | 29 | unsigned int *idx) { |
michael@0 | 30 | unsigned int index = *idx; |
michael@0 | 31 | struct lookahead_entry *buf = ctx->buf + index; |
michael@0 | 32 | |
michael@0 | 33 | assert(index < ctx->max_sz); |
michael@0 | 34 | if (++index >= ctx->max_sz) |
michael@0 | 35 | index -= ctx->max_sz; |
michael@0 | 36 | *idx = index; |
michael@0 | 37 | return buf; |
michael@0 | 38 | } |
michael@0 | 39 | |
michael@0 | 40 | |
michael@0 | 41 | void vp9_lookahead_destroy(struct lookahead_ctx *ctx) { |
michael@0 | 42 | if (ctx) { |
michael@0 | 43 | if (ctx->buf) { |
michael@0 | 44 | unsigned int i; |
michael@0 | 45 | |
michael@0 | 46 | for (i = 0; i < ctx->max_sz; i++) |
michael@0 | 47 | vp9_free_frame_buffer(&ctx->buf[i].img); |
michael@0 | 48 | free(ctx->buf); |
michael@0 | 49 | } |
michael@0 | 50 | free(ctx); |
michael@0 | 51 | } |
michael@0 | 52 | } |
michael@0 | 53 | |
michael@0 | 54 | |
michael@0 | 55 | struct lookahead_ctx * vp9_lookahead_init(unsigned int width, |
michael@0 | 56 | unsigned int height, |
michael@0 | 57 | unsigned int subsampling_x, |
michael@0 | 58 | unsigned int subsampling_y, |
michael@0 | 59 | unsigned int depth) { |
michael@0 | 60 | struct lookahead_ctx *ctx = NULL; |
michael@0 | 61 | |
michael@0 | 62 | // Clamp the lookahead queue depth |
michael@0 | 63 | depth = clamp(depth, 1, MAX_LAG_BUFFERS); |
michael@0 | 64 | |
michael@0 | 65 | // Allocate the lookahead structures |
michael@0 | 66 | ctx = calloc(1, sizeof(*ctx)); |
michael@0 | 67 | if (ctx) { |
michael@0 | 68 | unsigned int i; |
michael@0 | 69 | ctx->max_sz = depth; |
michael@0 | 70 | ctx->buf = calloc(depth, sizeof(*ctx->buf)); |
michael@0 | 71 | if (!ctx->buf) |
michael@0 | 72 | goto bail; |
michael@0 | 73 | for (i = 0; i < depth; i++) |
michael@0 | 74 | if (vp9_alloc_frame_buffer(&ctx->buf[i].img, |
michael@0 | 75 | width, height, subsampling_x, subsampling_y, |
michael@0 | 76 | VP9BORDERINPIXELS)) |
michael@0 | 77 | goto bail; |
michael@0 | 78 | } |
michael@0 | 79 | return ctx; |
michael@0 | 80 | bail: |
michael@0 | 81 | vp9_lookahead_destroy(ctx); |
michael@0 | 82 | return NULL; |
michael@0 | 83 | } |
michael@0 | 84 | |
michael@0 | 85 | #define USE_PARTIAL_COPY 0 |
michael@0 | 86 | |
michael@0 | 87 | int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, |
michael@0 | 88 | int64_t ts_start, int64_t ts_end, unsigned int flags, |
michael@0 | 89 | unsigned char *active_map) { |
michael@0 | 90 | struct lookahead_entry *buf; |
michael@0 | 91 | #if USE_PARTIAL_COPY |
michael@0 | 92 | int row, col, active_end; |
michael@0 | 93 | int mb_rows = (src->y_height + 15) >> 4; |
michael@0 | 94 | int mb_cols = (src->y_width + 15) >> 4; |
michael@0 | 95 | #endif |
michael@0 | 96 | |
michael@0 | 97 | if (ctx->sz + 1 > ctx->max_sz) |
michael@0 | 98 | return 1; |
michael@0 | 99 | ctx->sz++; |
michael@0 | 100 | buf = pop(ctx, &ctx->write_idx); |
michael@0 | 101 | |
michael@0 | 102 | #if USE_PARTIAL_COPY |
michael@0 | 103 | // TODO(jkoleszar): This is disabled for now, as |
michael@0 | 104 | // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware. |
michael@0 | 105 | |
michael@0 | 106 | // Only do this partial copy if the following conditions are all met: |
michael@0 | 107 | // 1. Lookahead queue has has size of 1. |
michael@0 | 108 | // 2. Active map is provided. |
michael@0 | 109 | // 3. This is not a key frame, golden nor altref frame. |
michael@0 | 110 | if (ctx->max_sz == 1 && active_map && !flags) { |
michael@0 | 111 | for (row = 0; row < mb_rows; ++row) { |
michael@0 | 112 | col = 0; |
michael@0 | 113 | |
michael@0 | 114 | while (1) { |
michael@0 | 115 | // Find the first active macroblock in this row. |
michael@0 | 116 | for (; col < mb_cols; ++col) { |
michael@0 | 117 | if (active_map[col]) |
michael@0 | 118 | break; |
michael@0 | 119 | } |
michael@0 | 120 | |
michael@0 | 121 | // No more active macroblock in this row. |
michael@0 | 122 | if (col == mb_cols) |
michael@0 | 123 | break; |
michael@0 | 124 | |
michael@0 | 125 | // Find the end of active region in this row. |
michael@0 | 126 | active_end = col; |
michael@0 | 127 | |
michael@0 | 128 | for (; active_end < mb_cols; ++active_end) { |
michael@0 | 129 | if (!active_map[active_end]) |
michael@0 | 130 | break; |
michael@0 | 131 | } |
michael@0 | 132 | |
michael@0 | 133 | // Only copy this active region. |
michael@0 | 134 | vp9_copy_and_extend_frame_with_rect(src, &buf->img, |
michael@0 | 135 | row << 4, |
michael@0 | 136 | col << 4, 16, |
michael@0 | 137 | (active_end - col) << 4); |
michael@0 | 138 | |
michael@0 | 139 | // Start again from the end of this active region. |
michael@0 | 140 | col = active_end; |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | active_map += mb_cols; |
michael@0 | 144 | } |
michael@0 | 145 | } else { |
michael@0 | 146 | vp9_copy_and_extend_frame(src, &buf->img); |
michael@0 | 147 | } |
michael@0 | 148 | #else |
michael@0 | 149 | // Partial copy not implemented yet |
michael@0 | 150 | vp9_copy_and_extend_frame(src, &buf->img); |
michael@0 | 151 | #endif |
michael@0 | 152 | |
michael@0 | 153 | buf->ts_start = ts_start; |
michael@0 | 154 | buf->ts_end = ts_end; |
michael@0 | 155 | buf->flags = flags; |
michael@0 | 156 | return 0; |
michael@0 | 157 | } |
michael@0 | 158 | |
michael@0 | 159 | |
michael@0 | 160 | struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx, |
michael@0 | 161 | int drain) { |
michael@0 | 162 | struct lookahead_entry *buf = NULL; |
michael@0 | 163 | |
michael@0 | 164 | if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) { |
michael@0 | 165 | buf = pop(ctx, &ctx->read_idx); |
michael@0 | 166 | ctx->sz--; |
michael@0 | 167 | } |
michael@0 | 168 | return buf; |
michael@0 | 169 | } |
michael@0 | 170 | |
michael@0 | 171 | |
michael@0 | 172 | struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx, |
michael@0 | 173 | int index) { |
michael@0 | 174 | struct lookahead_entry *buf = NULL; |
michael@0 | 175 | |
michael@0 | 176 | assert(index < (int)ctx->max_sz); |
michael@0 | 177 | if (index < (int)ctx->sz) { |
michael@0 | 178 | index += ctx->read_idx; |
michael@0 | 179 | if (index >= (int)ctx->max_sz) |
michael@0 | 180 | index -= ctx->max_sz; |
michael@0 | 181 | buf = ctx->buf + index; |
michael@0 | 182 | } |
michael@0 | 183 | return buf; |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { |
michael@0 | 187 | return ctx->sz; |
michael@0 | 188 | } |