|
1 /* |
|
2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved. |
|
3 * |
|
4 * Use of this source code is governed by a BSD-style license |
|
5 * that can be found in the LICENSE file in the root of the source |
|
6 * tree. An additional intellectual property rights grant can be found |
|
7 * in the file PATENTS. All contributing project authors may |
|
8 * be found in the AUTHORS file in the root of the source tree. |
|
9 */ |
|
10 #include <assert.h> |
|
11 #include <stdlib.h> |
|
12 |
|
13 #include "./vpx_config.h" |
|
14 #include "vp9/common/vp9_common.h" |
|
15 #include "vp9/encoder/vp9_lookahead.h" |
|
16 #include "vp9/common/vp9_extend.h" |
|
17 |
|
18 struct lookahead_ctx { |
|
19 unsigned int max_sz; /* Absolute size of the queue */ |
|
20 unsigned int sz; /* Number of buffers currently in the queue */ |
|
21 unsigned int read_idx; /* Read index */ |
|
22 unsigned int write_idx; /* Write index */ |
|
23 struct lookahead_entry *buf; /* Buffer list */ |
|
24 }; |
|
25 |
|
26 |
|
27 /* Return the buffer at the given absolute index and increment the index */ |
|
28 static struct lookahead_entry * pop(struct lookahead_ctx *ctx, |
|
29 unsigned int *idx) { |
|
30 unsigned int index = *idx; |
|
31 struct lookahead_entry *buf = ctx->buf + index; |
|
32 |
|
33 assert(index < ctx->max_sz); |
|
34 if (++index >= ctx->max_sz) |
|
35 index -= ctx->max_sz; |
|
36 *idx = index; |
|
37 return buf; |
|
38 } |
|
39 |
|
40 |
|
41 void vp9_lookahead_destroy(struct lookahead_ctx *ctx) { |
|
42 if (ctx) { |
|
43 if (ctx->buf) { |
|
44 unsigned int i; |
|
45 |
|
46 for (i = 0; i < ctx->max_sz; i++) |
|
47 vp9_free_frame_buffer(&ctx->buf[i].img); |
|
48 free(ctx->buf); |
|
49 } |
|
50 free(ctx); |
|
51 } |
|
52 } |
|
53 |
|
54 |
|
55 struct lookahead_ctx * vp9_lookahead_init(unsigned int width, |
|
56 unsigned int height, |
|
57 unsigned int subsampling_x, |
|
58 unsigned int subsampling_y, |
|
59 unsigned int depth) { |
|
60 struct lookahead_ctx *ctx = NULL; |
|
61 |
|
62 // Clamp the lookahead queue depth |
|
63 depth = clamp(depth, 1, MAX_LAG_BUFFERS); |
|
64 |
|
65 // Allocate the lookahead structures |
|
66 ctx = calloc(1, sizeof(*ctx)); |
|
67 if (ctx) { |
|
68 unsigned int i; |
|
69 ctx->max_sz = depth; |
|
70 ctx->buf = calloc(depth, sizeof(*ctx->buf)); |
|
71 if (!ctx->buf) |
|
72 goto bail; |
|
73 for (i = 0; i < depth; i++) |
|
74 if (vp9_alloc_frame_buffer(&ctx->buf[i].img, |
|
75 width, height, subsampling_x, subsampling_y, |
|
76 VP9BORDERINPIXELS)) |
|
77 goto bail; |
|
78 } |
|
79 return ctx; |
|
80 bail: |
|
81 vp9_lookahead_destroy(ctx); |
|
82 return NULL; |
|
83 } |
|
84 |
|
85 #define USE_PARTIAL_COPY 0 |
|
86 |
|
87 int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, |
|
88 int64_t ts_start, int64_t ts_end, unsigned int flags, |
|
89 unsigned char *active_map) { |
|
90 struct lookahead_entry *buf; |
|
91 #if USE_PARTIAL_COPY |
|
92 int row, col, active_end; |
|
93 int mb_rows = (src->y_height + 15) >> 4; |
|
94 int mb_cols = (src->y_width + 15) >> 4; |
|
95 #endif |
|
96 |
|
97 if (ctx->sz + 1 > ctx->max_sz) |
|
98 return 1; |
|
99 ctx->sz++; |
|
100 buf = pop(ctx, &ctx->write_idx); |
|
101 |
|
102 #if USE_PARTIAL_COPY |
|
103 // TODO(jkoleszar): This is disabled for now, as |
|
104 // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware. |
|
105 |
|
106 // Only do this partial copy if the following conditions are all met: |
|
107 // 1. Lookahead queue has has size of 1. |
|
108 // 2. Active map is provided. |
|
109 // 3. This is not a key frame, golden nor altref frame. |
|
110 if (ctx->max_sz == 1 && active_map && !flags) { |
|
111 for (row = 0; row < mb_rows; ++row) { |
|
112 col = 0; |
|
113 |
|
114 while (1) { |
|
115 // Find the first active macroblock in this row. |
|
116 for (; col < mb_cols; ++col) { |
|
117 if (active_map[col]) |
|
118 break; |
|
119 } |
|
120 |
|
121 // No more active macroblock in this row. |
|
122 if (col == mb_cols) |
|
123 break; |
|
124 |
|
125 // Find the end of active region in this row. |
|
126 active_end = col; |
|
127 |
|
128 for (; active_end < mb_cols; ++active_end) { |
|
129 if (!active_map[active_end]) |
|
130 break; |
|
131 } |
|
132 |
|
133 // Only copy this active region. |
|
134 vp9_copy_and_extend_frame_with_rect(src, &buf->img, |
|
135 row << 4, |
|
136 col << 4, 16, |
|
137 (active_end - col) << 4); |
|
138 |
|
139 // Start again from the end of this active region. |
|
140 col = active_end; |
|
141 } |
|
142 |
|
143 active_map += mb_cols; |
|
144 } |
|
145 } else { |
|
146 vp9_copy_and_extend_frame(src, &buf->img); |
|
147 } |
|
148 #else |
|
149 // Partial copy not implemented yet |
|
150 vp9_copy_and_extend_frame(src, &buf->img); |
|
151 #endif |
|
152 |
|
153 buf->ts_start = ts_start; |
|
154 buf->ts_end = ts_end; |
|
155 buf->flags = flags; |
|
156 return 0; |
|
157 } |
|
158 |
|
159 |
|
160 struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx, |
|
161 int drain) { |
|
162 struct lookahead_entry *buf = NULL; |
|
163 |
|
164 if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) { |
|
165 buf = pop(ctx, &ctx->read_idx); |
|
166 ctx->sz--; |
|
167 } |
|
168 return buf; |
|
169 } |
|
170 |
|
171 |
|
172 struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx, |
|
173 int index) { |
|
174 struct lookahead_entry *buf = NULL; |
|
175 |
|
176 assert(index < (int)ctx->max_sz); |
|
177 if (index < (int)ctx->sz) { |
|
178 index += ctx->read_idx; |
|
179 if (index >= (int)ctx->max_sz) |
|
180 index -= ctx->max_sz; |
|
181 buf = ctx->buf + index; |
|
182 } |
|
183 return buf; |
|
184 } |
|
185 |
|
186 unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { |
|
187 return ctx->sz; |
|
188 } |