|
1 /* |
|
2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved. |
|
3 * |
|
4 * Use of this source code is governed by a BSD-style license |
|
5 * that can be found in the LICENSE file in the root of the source |
|
6 * tree. An additional intellectual property rights grant can be found |
|
7 * in the file PATENTS. All contributing project authors may |
|
8 * be found in the AUTHORS file in the root of the source tree. |
|
9 */ |
|
10 #include <assert.h> |
|
11 #include <stdlib.h> |
|
12 #include "vpx_config.h" |
|
13 #include "lookahead.h" |
|
14 #include "vp8/common/extend.h" |
|
15 |
|
16 #define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY? 1 : 25) |
|
17 |
|
18 struct lookahead_ctx |
|
19 { |
|
20 unsigned int max_sz; /* Absolute size of the queue */ |
|
21 unsigned int sz; /* Number of buffers currently in the queue */ |
|
22 unsigned int read_idx; /* Read index */ |
|
23 unsigned int write_idx; /* Write index */ |
|
24 struct lookahead_entry *buf; /* Buffer list */ |
|
25 }; |
|
26 |
|
27 |
|
28 /* Return the buffer at the given absolute index and increment the index */ |
|
29 static struct lookahead_entry * |
|
30 pop(struct lookahead_ctx *ctx, |
|
31 unsigned int *idx) |
|
32 { |
|
33 unsigned int index = *idx; |
|
34 struct lookahead_entry *buf = ctx->buf + index; |
|
35 |
|
36 assert(index < ctx->max_sz); |
|
37 if(++index >= ctx->max_sz) |
|
38 index -= ctx->max_sz; |
|
39 *idx = index; |
|
40 return buf; |
|
41 } |
|
42 |
|
43 |
|
44 void |
|
45 vp8_lookahead_destroy(struct lookahead_ctx *ctx) |
|
46 { |
|
47 if(ctx) |
|
48 { |
|
49 if(ctx->buf) |
|
50 { |
|
51 unsigned int i; |
|
52 |
|
53 for(i = 0; i < ctx->max_sz; i++) |
|
54 vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img); |
|
55 free(ctx->buf); |
|
56 } |
|
57 free(ctx); |
|
58 } |
|
59 } |
|
60 |
|
61 |
|
62 struct lookahead_ctx* |
|
63 vp8_lookahead_init(unsigned int width, |
|
64 unsigned int height, |
|
65 unsigned int depth) |
|
66 { |
|
67 struct lookahead_ctx *ctx = NULL; |
|
68 unsigned int i; |
|
69 |
|
70 /* Clamp the lookahead queue depth */ |
|
71 if(depth < 1) |
|
72 depth = 1; |
|
73 else if(depth > MAX_LAG_BUFFERS) |
|
74 depth = MAX_LAG_BUFFERS; |
|
75 |
|
76 /* Keep last frame in lookahead buffer by increasing depth by 1.*/ |
|
77 depth += 1; |
|
78 |
|
79 /* Align the buffer dimensions */ |
|
80 width = (width + 15) & ~15; |
|
81 height = (height + 15) & ~15; |
|
82 |
|
83 /* Allocate the lookahead structures */ |
|
84 ctx = calloc(1, sizeof(*ctx)); |
|
85 if(ctx) |
|
86 { |
|
87 ctx->max_sz = depth; |
|
88 ctx->buf = calloc(depth, sizeof(*ctx->buf)); |
|
89 if(!ctx->buf) |
|
90 goto bail; |
|
91 for(i=0; i<depth; i++) |
|
92 if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, |
|
93 width, height, VP8BORDERINPIXELS)) |
|
94 goto bail; |
|
95 } |
|
96 return ctx; |
|
97 bail: |
|
98 vp8_lookahead_destroy(ctx); |
|
99 return NULL; |
|
100 } |
|
101 |
|
102 |
|
103 int |
|
104 vp8_lookahead_push(struct lookahead_ctx *ctx, |
|
105 YV12_BUFFER_CONFIG *src, |
|
106 int64_t ts_start, |
|
107 int64_t ts_end, |
|
108 unsigned int flags, |
|
109 unsigned char *active_map) |
|
110 { |
|
111 struct lookahead_entry* buf; |
|
112 int row, col, active_end; |
|
113 int mb_rows = (src->y_height + 15) >> 4; |
|
114 int mb_cols = (src->y_width + 15) >> 4; |
|
115 |
|
116 if(ctx->sz + 2 > ctx->max_sz) |
|
117 return 1; |
|
118 ctx->sz++; |
|
119 buf = pop(ctx, &ctx->write_idx); |
|
120 |
|
121 /* Only do this partial copy if the following conditions are all met: |
|
122 * 1. Lookahead queue has has size of 1. |
|
123 * 2. Active map is provided. |
|
124 * 3. This is not a key frame, golden nor altref frame. |
|
125 */ |
|
126 if (ctx->max_sz == 1 && active_map && !flags) |
|
127 { |
|
128 for (row = 0; row < mb_rows; ++row) |
|
129 { |
|
130 col = 0; |
|
131 |
|
132 while (1) |
|
133 { |
|
134 /* Find the first active macroblock in this row. */ |
|
135 for (; col < mb_cols; ++col) |
|
136 { |
|
137 if (active_map[col]) |
|
138 break; |
|
139 } |
|
140 |
|
141 /* No more active macroblock in this row. */ |
|
142 if (col == mb_cols) |
|
143 break; |
|
144 |
|
145 /* Find the end of active region in this row. */ |
|
146 active_end = col; |
|
147 |
|
148 for (; active_end < mb_cols; ++active_end) |
|
149 { |
|
150 if (!active_map[active_end]) |
|
151 break; |
|
152 } |
|
153 |
|
154 /* Only copy this active region. */ |
|
155 vp8_copy_and_extend_frame_with_rect(src, &buf->img, |
|
156 row << 4, |
|
157 col << 4, 16, |
|
158 (active_end - col) << 4); |
|
159 |
|
160 /* Start again from the end of this active region. */ |
|
161 col = active_end; |
|
162 } |
|
163 |
|
164 active_map += mb_cols; |
|
165 } |
|
166 } |
|
167 else |
|
168 { |
|
169 vp8_copy_and_extend_frame(src, &buf->img); |
|
170 } |
|
171 buf->ts_start = ts_start; |
|
172 buf->ts_end = ts_end; |
|
173 buf->flags = flags; |
|
174 return 0; |
|
175 } |
|
176 |
|
177 |
|
178 struct lookahead_entry* |
|
179 vp8_lookahead_pop(struct lookahead_ctx *ctx, |
|
180 int drain) |
|
181 { |
|
182 struct lookahead_entry* buf = NULL; |
|
183 |
|
184 if(ctx->sz && (drain || ctx->sz == ctx->max_sz - 1)) |
|
185 { |
|
186 buf = pop(ctx, &ctx->read_idx); |
|
187 ctx->sz--; |
|
188 } |
|
189 return buf; |
|
190 } |
|
191 |
|
192 |
|
193 struct lookahead_entry* |
|
194 vp8_lookahead_peek(struct lookahead_ctx *ctx, |
|
195 unsigned int index, |
|
196 int direction) |
|
197 { |
|
198 struct lookahead_entry* buf = NULL; |
|
199 |
|
200 if (direction == PEEK_FORWARD) |
|
201 { |
|
202 assert(index < ctx->max_sz - 1); |
|
203 if(index < ctx->sz) |
|
204 { |
|
205 index += ctx->read_idx; |
|
206 if(index >= ctx->max_sz) |
|
207 index -= ctx->max_sz; |
|
208 buf = ctx->buf + index; |
|
209 } |
|
210 } |
|
211 else if (direction == PEEK_BACKWARD) |
|
212 { |
|
213 assert(index == 1); |
|
214 |
|
215 if(ctx->read_idx == 0) |
|
216 index = ctx->max_sz - 1; |
|
217 else |
|
218 index = ctx->read_idx - index; |
|
219 buf = ctx->buf + index; |
|
220 } |
|
221 |
|
222 return buf; |
|
223 } |
|
224 |
|
225 |
|
226 unsigned int |
|
227 vp8_lookahead_depth(struct lookahead_ctx *ctx) |
|
228 { |
|
229 return ctx->sz; |
|
230 } |