media/libvpx/vp8/encoder/lookahead.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10 #include <assert.h>
michael@0 11 #include <stdlib.h>
michael@0 12 #include "vpx_config.h"
michael@0 13 #include "lookahead.h"
michael@0 14 #include "vp8/common/extend.h"
michael@0 15
michael@0 16 #define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY? 1 : 25)
michael@0 17
michael@0 18 struct lookahead_ctx
michael@0 19 {
michael@0 20 unsigned int max_sz; /* Absolute size of the queue */
michael@0 21 unsigned int sz; /* Number of buffers currently in the queue */
michael@0 22 unsigned int read_idx; /* Read index */
michael@0 23 unsigned int write_idx; /* Write index */
michael@0 24 struct lookahead_entry *buf; /* Buffer list */
michael@0 25 };
michael@0 26
michael@0 27
michael@0 28 /* Return the buffer at the given absolute index and increment the index */
michael@0 29 static struct lookahead_entry *
michael@0 30 pop(struct lookahead_ctx *ctx,
michael@0 31 unsigned int *idx)
michael@0 32 {
michael@0 33 unsigned int index = *idx;
michael@0 34 struct lookahead_entry *buf = ctx->buf + index;
michael@0 35
michael@0 36 assert(index < ctx->max_sz);
michael@0 37 if(++index >= ctx->max_sz)
michael@0 38 index -= ctx->max_sz;
michael@0 39 *idx = index;
michael@0 40 return buf;
michael@0 41 }
michael@0 42
michael@0 43
michael@0 44 void
michael@0 45 vp8_lookahead_destroy(struct lookahead_ctx *ctx)
michael@0 46 {
michael@0 47 if(ctx)
michael@0 48 {
michael@0 49 if(ctx->buf)
michael@0 50 {
michael@0 51 unsigned int i;
michael@0 52
michael@0 53 for(i = 0; i < ctx->max_sz; i++)
michael@0 54 vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
michael@0 55 free(ctx->buf);
michael@0 56 }
michael@0 57 free(ctx);
michael@0 58 }
michael@0 59 }
michael@0 60
michael@0 61
michael@0 62 struct lookahead_ctx*
michael@0 63 vp8_lookahead_init(unsigned int width,
michael@0 64 unsigned int height,
michael@0 65 unsigned int depth)
michael@0 66 {
michael@0 67 struct lookahead_ctx *ctx = NULL;
michael@0 68 unsigned int i;
michael@0 69
michael@0 70 /* Clamp the lookahead queue depth */
michael@0 71 if(depth < 1)
michael@0 72 depth = 1;
michael@0 73 else if(depth > MAX_LAG_BUFFERS)
michael@0 74 depth = MAX_LAG_BUFFERS;
michael@0 75
michael@0 76 /* Keep last frame in lookahead buffer by increasing depth by 1.*/
michael@0 77 depth += 1;
michael@0 78
michael@0 79 /* Align the buffer dimensions */
michael@0 80 width = (width + 15) & ~15;
michael@0 81 height = (height + 15) & ~15;
michael@0 82
michael@0 83 /* Allocate the lookahead structures */
michael@0 84 ctx = calloc(1, sizeof(*ctx));
michael@0 85 if(ctx)
michael@0 86 {
michael@0 87 ctx->max_sz = depth;
michael@0 88 ctx->buf = calloc(depth, sizeof(*ctx->buf));
michael@0 89 if(!ctx->buf)
michael@0 90 goto bail;
michael@0 91 for(i=0; i<depth; i++)
michael@0 92 if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img,
michael@0 93 width, height, VP8BORDERINPIXELS))
michael@0 94 goto bail;
michael@0 95 }
michael@0 96 return ctx;
michael@0 97 bail:
michael@0 98 vp8_lookahead_destroy(ctx);
michael@0 99 return NULL;
michael@0 100 }
michael@0 101
michael@0 102
michael@0 103 int
michael@0 104 vp8_lookahead_push(struct lookahead_ctx *ctx,
michael@0 105 YV12_BUFFER_CONFIG *src,
michael@0 106 int64_t ts_start,
michael@0 107 int64_t ts_end,
michael@0 108 unsigned int flags,
michael@0 109 unsigned char *active_map)
michael@0 110 {
michael@0 111 struct lookahead_entry* buf;
michael@0 112 int row, col, active_end;
michael@0 113 int mb_rows = (src->y_height + 15) >> 4;
michael@0 114 int mb_cols = (src->y_width + 15) >> 4;
michael@0 115
michael@0 116 if(ctx->sz + 2 > ctx->max_sz)
michael@0 117 return 1;
michael@0 118 ctx->sz++;
michael@0 119 buf = pop(ctx, &ctx->write_idx);
michael@0 120
michael@0 121 /* Only do this partial copy if the following conditions are all met:
michael@0 122 * 1. Lookahead queue has has size of 1.
michael@0 123 * 2. Active map is provided.
michael@0 124 * 3. This is not a key frame, golden nor altref frame.
michael@0 125 */
michael@0 126 if (ctx->max_sz == 1 && active_map && !flags)
michael@0 127 {
michael@0 128 for (row = 0; row < mb_rows; ++row)
michael@0 129 {
michael@0 130 col = 0;
michael@0 131
michael@0 132 while (1)
michael@0 133 {
michael@0 134 /* Find the first active macroblock in this row. */
michael@0 135 for (; col < mb_cols; ++col)
michael@0 136 {
michael@0 137 if (active_map[col])
michael@0 138 break;
michael@0 139 }
michael@0 140
michael@0 141 /* No more active macroblock in this row. */
michael@0 142 if (col == mb_cols)
michael@0 143 break;
michael@0 144
michael@0 145 /* Find the end of active region in this row. */
michael@0 146 active_end = col;
michael@0 147
michael@0 148 for (; active_end < mb_cols; ++active_end)
michael@0 149 {
michael@0 150 if (!active_map[active_end])
michael@0 151 break;
michael@0 152 }
michael@0 153
michael@0 154 /* Only copy this active region. */
michael@0 155 vp8_copy_and_extend_frame_with_rect(src, &buf->img,
michael@0 156 row << 4,
michael@0 157 col << 4, 16,
michael@0 158 (active_end - col) << 4);
michael@0 159
michael@0 160 /* Start again from the end of this active region. */
michael@0 161 col = active_end;
michael@0 162 }
michael@0 163
michael@0 164 active_map += mb_cols;
michael@0 165 }
michael@0 166 }
michael@0 167 else
michael@0 168 {
michael@0 169 vp8_copy_and_extend_frame(src, &buf->img);
michael@0 170 }
michael@0 171 buf->ts_start = ts_start;
michael@0 172 buf->ts_end = ts_end;
michael@0 173 buf->flags = flags;
michael@0 174 return 0;
michael@0 175 }
michael@0 176
michael@0 177
michael@0 178 struct lookahead_entry*
michael@0 179 vp8_lookahead_pop(struct lookahead_ctx *ctx,
michael@0 180 int drain)
michael@0 181 {
michael@0 182 struct lookahead_entry* buf = NULL;
michael@0 183
michael@0 184 if(ctx->sz && (drain || ctx->sz == ctx->max_sz - 1))
michael@0 185 {
michael@0 186 buf = pop(ctx, &ctx->read_idx);
michael@0 187 ctx->sz--;
michael@0 188 }
michael@0 189 return buf;
michael@0 190 }
michael@0 191
michael@0 192
michael@0 193 struct lookahead_entry*
michael@0 194 vp8_lookahead_peek(struct lookahead_ctx *ctx,
michael@0 195 unsigned int index,
michael@0 196 int direction)
michael@0 197 {
michael@0 198 struct lookahead_entry* buf = NULL;
michael@0 199
michael@0 200 if (direction == PEEK_FORWARD)
michael@0 201 {
michael@0 202 assert(index < ctx->max_sz - 1);
michael@0 203 if(index < ctx->sz)
michael@0 204 {
michael@0 205 index += ctx->read_idx;
michael@0 206 if(index >= ctx->max_sz)
michael@0 207 index -= ctx->max_sz;
michael@0 208 buf = ctx->buf + index;
michael@0 209 }
michael@0 210 }
michael@0 211 else if (direction == PEEK_BACKWARD)
michael@0 212 {
michael@0 213 assert(index == 1);
michael@0 214
michael@0 215 if(ctx->read_idx == 0)
michael@0 216 index = ctx->max_sz - 1;
michael@0 217 else
michael@0 218 index = ctx->read_idx - index;
michael@0 219 buf = ctx->buf + index;
michael@0 220 }
michael@0 221
michael@0 222 return buf;
michael@0 223 }
michael@0 224
michael@0 225
michael@0 226 unsigned int
michael@0 227 vp8_lookahead_depth(struct lookahead_ctx *ctx)
michael@0 228 {
michael@0 229 return ctx->sz;
michael@0 230 }

mercurial