media/libvpx/vp8/encoder/encodeframe.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/libvpx/vp8/encoder/encodeframe.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1404 @@
     1.4 +/*
     1.5 + *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
     1.6 + *
     1.7 + *  Use of this source code is governed by a BSD-style license
     1.8 + *  that can be found in the LICENSE file in the root of the source
     1.9 + *  tree. An additional intellectual property rights grant can be found
    1.10 + *  in the file PATENTS.  All contributing project authors may
    1.11 + *  be found in the AUTHORS file in the root of the source tree.
    1.12 + */
    1.13 +
    1.14 +
    1.15 +#include "vpx_config.h"
    1.16 +#include "vp8_rtcd.h"
    1.17 +#include "encodemb.h"
    1.18 +#include "encodemv.h"
    1.19 +#include "vp8/common/common.h"
    1.20 +#include "onyx_int.h"
    1.21 +#include "vp8/common/extend.h"
    1.22 +#include "vp8/common/entropymode.h"
    1.23 +#include "vp8/common/quant_common.h"
    1.24 +#include "segmentation.h"
    1.25 +#include "vp8/common/setupintrarecon.h"
    1.26 +#include "encodeintra.h"
    1.27 +#include "vp8/common/reconinter.h"
    1.28 +#include "rdopt.h"
    1.29 +#include "pickinter.h"
    1.30 +#include "vp8/common/findnearmv.h"
    1.31 +#include <stdio.h>
    1.32 +#include <limits.h>
    1.33 +#include "vp8/common/invtrans.h"
    1.34 +#include "vpx_ports/vpx_timer.h"
    1.35 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
    1.36 +#include "bitstream.h"
    1.37 +#endif
    1.38 +#include "encodeframe.h"
    1.39 +
    1.40 +extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ;
    1.41 +extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
    1.42 +                                     int prob_intra,
    1.43 +                                     int prob_last,
    1.44 +                                     int prob_garf
    1.45 +                                    );
    1.46 +extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
    1.47 +extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
    1.48 +extern void vp8_auto_select_speed(VP8_COMP *cpi);
    1.49 +extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
    1.50 +                                      MACROBLOCK *x,
    1.51 +                                      MB_ROW_COMP *mbr_ei,
    1.52 +                                      int count);
    1.53 +static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
    1.54 +
    1.55 +#ifdef MODE_STATS
    1.56 +unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
    1.57 +unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
    1.58 +unsigned int inter_b_modes[15]  = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
    1.59 +unsigned int y_modes[5]   = {0, 0, 0, 0, 0};
    1.60 +unsigned int uv_modes[4]  = {0, 0, 0, 0};
    1.61 +unsigned int b_modes[14]  = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
    1.62 +#endif
    1.63 +
    1.64 +
    1.65 +/* activity_avg must be positive, or flat regions could get a zero weight
    1.66 + *  (infinite lambda), which confounds analysis.
    1.67 + * This also avoids the need for divide by zero checks in
    1.68 + *  vp8_activity_masking().
    1.69 + */
    1.70 +#define VP8_ACTIVITY_AVG_MIN (64)
    1.71 +
    1.72 +/* This is used as a reference when computing the source variance for the
    1.73 + *  purposes of activity masking.
    1.74 + * Eventually this should be replaced by custom no-reference routines,
    1.75 + *  which will be faster.
    1.76 + */
    1.77 +static const unsigned char VP8_VAR_OFFS[16]=
    1.78 +{
    1.79 +    128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
    1.80 +};
    1.81 +
    1.82 +
    1.83 +/* Original activity measure from Tim T's code. */
    1.84 +static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
    1.85 +{
    1.86 +    unsigned int act;
    1.87 +    unsigned int sse;
    1.88 +    /* TODO: This could also be done over smaller areas (8x8), but that would
    1.89 +     *  require extensive changes elsewhere, as lambda is assumed to be fixed
    1.90 +     *  over an entire MB in most of the code.
    1.91 +     * Another option is to compute four 8x8 variances, and pick a single
    1.92 +     *  lambda using a non-linear combination (e.g., the smallest, or second
    1.93 +     *  smallest, etc.).
    1.94 +     */
    1.95 +    act =  vp8_variance16x16(x->src.y_buffer,
    1.96 +                    x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
    1.97 +    act = act<<4;
    1.98 +
    1.99 +    /* If the region is flat, lower the activity some more. */
   1.100 +    if (act < 8<<12)
   1.101 +        act = act < 5<<12 ? act : 5<<12;
   1.102 +
   1.103 +    return act;
   1.104 +}
   1.105 +
   1.106 +/* Stub for alternative experimental activity measures. */
   1.107 +static unsigned int alt_activity_measure( VP8_COMP *cpi,
   1.108 +                                          MACROBLOCK *x, int use_dc_pred )
   1.109 +{
   1.110 +    return vp8_encode_intra(cpi,x, use_dc_pred);
   1.111 +}
   1.112 +
   1.113 +
   1.114 +/* Measure the activity of the current macroblock
   1.115 + * What we measure here is TBD so abstracted to this function
   1.116 + */
   1.117 +#define ALT_ACT_MEASURE 1
   1.118 +static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
   1.119 +                                  int mb_row, int mb_col)
   1.120 +{
   1.121 +    unsigned int mb_activity;
   1.122 +
   1.123 +    if  ( ALT_ACT_MEASURE )
   1.124 +    {
   1.125 +        int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
   1.126 +
   1.127 +        /* Or use and alternative. */
   1.128 +        mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
   1.129 +    }
   1.130 +    else
   1.131 +    {
   1.132 +        /* Original activity measure from Tim T's code. */
   1.133 +        mb_activity = tt_activity_measure( cpi, x );
   1.134 +    }
   1.135 +
   1.136 +    if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
   1.137 +        mb_activity = VP8_ACTIVITY_AVG_MIN;
   1.138 +
   1.139 +    return mb_activity;
   1.140 +}
   1.141 +
   1.142 +/* Calculate an "average" mb activity value for the frame */
   1.143 +#define ACT_MEDIAN 0
   1.144 +static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
   1.145 +{
   1.146 +#if ACT_MEDIAN
   1.147 +    /* Find median: Simple n^2 algorithm for experimentation */
   1.148 +    {
   1.149 +        unsigned int median;
   1.150 +        unsigned int i,j;
   1.151 +        unsigned int * sortlist;
   1.152 +        unsigned int tmp;
   1.153 +
   1.154 +        /* Create a list to sort to */
   1.155 +        CHECK_MEM_ERROR(sortlist,
   1.156 +                        vpx_calloc(sizeof(unsigned int),
   1.157 +                        cpi->common.MBs));
   1.158 +
   1.159 +        /* Copy map to sort list */
   1.160 +        vpx_memcpy( sortlist, cpi->mb_activity_map,
   1.161 +                    sizeof(unsigned int) * cpi->common.MBs );
   1.162 +
   1.163 +
   1.164 +        /* Ripple each value down to its correct position */
   1.165 +        for ( i = 1; i < cpi->common.MBs; i ++ )
   1.166 +        {
   1.167 +            for ( j = i; j > 0; j -- )
   1.168 +            {
   1.169 +                if ( sortlist[j] < sortlist[j-1] )
   1.170 +                {
   1.171 +                    /* Swap values */
   1.172 +                    tmp = sortlist[j-1];
   1.173 +                    sortlist[j-1] = sortlist[j];
   1.174 +                    sortlist[j] = tmp;
   1.175 +                }
   1.176 +                else
   1.177 +                    break;
   1.178 +            }
   1.179 +        }
   1.180 +
   1.181 +        /* Even number MBs so estimate median as mean of two either side. */
   1.182 +        median = ( 1 + sortlist[cpi->common.MBs >> 1] +
   1.183 +                   sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
   1.184 +
   1.185 +        cpi->activity_avg = median;
   1.186 +
   1.187 +        vpx_free(sortlist);
   1.188 +    }
   1.189 +#else
   1.190 +    /* Simple mean for now */
   1.191 +    cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
   1.192 +#endif
   1.193 +
   1.194 +    if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
   1.195 +        cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
   1.196 +
   1.197 +    /* Experimental code: return fixed value normalized for several clips */
   1.198 +    if  ( ALT_ACT_MEASURE )
   1.199 +        cpi->activity_avg = 100000;
   1.200 +}
   1.201 +
   1.202 +#define USE_ACT_INDEX   0
   1.203 +#define OUTPUT_NORM_ACT_STATS   0
   1.204 +
   1.205 +#if USE_ACT_INDEX
   1.206 +/* Calculate and activity index for each mb */
   1.207 +static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
   1.208 +{
   1.209 +    VP8_COMMON *const cm = & cpi->common;
   1.210 +    int mb_row, mb_col;
   1.211 +
   1.212 +    int64_t act;
   1.213 +    int64_t a;
   1.214 +    int64_t b;
   1.215 +
   1.216 +#if OUTPUT_NORM_ACT_STATS
   1.217 +    FILE *f = fopen("norm_act.stt", "a");
   1.218 +    fprintf(f, "\n%12d\n", cpi->activity_avg );
   1.219 +#endif
   1.220 +
   1.221 +    /* Reset pointers to start of activity map */
   1.222 +    x->mb_activity_ptr = cpi->mb_activity_map;
   1.223 +
   1.224 +    /* Calculate normalized mb activity number. */
   1.225 +    for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
   1.226 +    {
   1.227 +        /* for each macroblock col in image */
   1.228 +        for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
   1.229 +        {
   1.230 +            /* Read activity from the map */
   1.231 +            act = *(x->mb_activity_ptr);
   1.232 +
   1.233 +            /* Calculate a normalized activity number */
   1.234 +            a = act + 4*cpi->activity_avg;
   1.235 +            b = 4*act + cpi->activity_avg;
   1.236 +
   1.237 +            if ( b >= a )
   1.238 +                *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
   1.239 +            else
   1.240 +                *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
   1.241 +
   1.242 +#if OUTPUT_NORM_ACT_STATS
   1.243 +            fprintf(f, " %6d", *(x->mb_activity_ptr));
   1.244 +#endif
   1.245 +            /* Increment activity map pointers */
   1.246 +            x->mb_activity_ptr++;
   1.247 +        }
   1.248 +
   1.249 +#if OUTPUT_NORM_ACT_STATS
   1.250 +        fprintf(f, "\n");
   1.251 +#endif
   1.252 +
   1.253 +    }
   1.254 +
   1.255 +#if OUTPUT_NORM_ACT_STATS
   1.256 +    fclose(f);
   1.257 +#endif
   1.258 +
   1.259 +}
   1.260 +#endif
   1.261 +
   1.262 +/* Loop through all MBs. Note activity of each, average activity and
   1.263 + * calculate a normalized activity for each
   1.264 + */
   1.265 +static void build_activity_map( VP8_COMP *cpi )
   1.266 +{
   1.267 +    MACROBLOCK *const x = & cpi->mb;
   1.268 +    MACROBLOCKD *xd = &x->e_mbd;
   1.269 +    VP8_COMMON *const cm = & cpi->common;
   1.270 +
   1.271 +#if ALT_ACT_MEASURE
   1.272 +    YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
   1.273 +    int recon_yoffset;
   1.274 +    int recon_y_stride = new_yv12->y_stride;
   1.275 +#endif
   1.276 +
   1.277 +    int mb_row, mb_col;
   1.278 +    unsigned int mb_activity;
   1.279 +    int64_t activity_sum = 0;
   1.280 +
   1.281 +    /* for each macroblock row in image */
   1.282 +    for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
   1.283 +    {
   1.284 +#if ALT_ACT_MEASURE
   1.285 +        /* reset above block coeffs */
   1.286 +        xd->up_available = (mb_row != 0);
   1.287 +        recon_yoffset = (mb_row * recon_y_stride * 16);
   1.288 +#endif
   1.289 +        /* for each macroblock col in image */
   1.290 +        for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
   1.291 +        {
   1.292 +#if ALT_ACT_MEASURE
   1.293 +            xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
   1.294 +            xd->left_available = (mb_col != 0);
   1.295 +            recon_yoffset += 16;
   1.296 +#endif
   1.297 +            /* Copy current mb to a buffer */
   1.298 +            vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
   1.299 +
   1.300 +            /* measure activity */
   1.301 +            mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
   1.302 +
   1.303 +            /* Keep frame sum */
   1.304 +            activity_sum += mb_activity;
   1.305 +
   1.306 +            /* Store MB level activity details. */
   1.307 +            *x->mb_activity_ptr = mb_activity;
   1.308 +
   1.309 +            /* Increment activity map pointer */
   1.310 +            x->mb_activity_ptr++;
   1.311 +
   1.312 +            /* adjust to the next column of source macroblocks */
   1.313 +            x->src.y_buffer += 16;
   1.314 +        }
   1.315 +
   1.316 +
   1.317 +        /* adjust to the next row of mbs */
   1.318 +        x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
   1.319 +
   1.320 +#if ALT_ACT_MEASURE
   1.321 +        /* extend the recon for intra prediction */
   1.322 +        vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
   1.323 +                          xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
   1.324 +#endif
   1.325 +
   1.326 +    }
   1.327 +
   1.328 +    /* Calculate an "average" MB activity */
   1.329 +    calc_av_activity(cpi, activity_sum);
   1.330 +
   1.331 +#if USE_ACT_INDEX
   1.332 +    /* Calculate an activity index number of each mb */
   1.333 +    calc_activity_index( cpi, x );
   1.334 +#endif
   1.335 +
   1.336 +}
   1.337 +
   1.338 +/* Macroblock activity masking */
   1.339 +void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
   1.340 +{
   1.341 +#if USE_ACT_INDEX
   1.342 +    x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
   1.343 +    x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
   1.344 +    x->errorperbit += (x->errorperbit==0);
   1.345 +#else
   1.346 +    int64_t a;
   1.347 +    int64_t b;
   1.348 +    int64_t act = *(x->mb_activity_ptr);
   1.349 +
   1.350 +    /* Apply the masking to the RD multiplier. */
   1.351 +    a = act + (2*cpi->activity_avg);
   1.352 +    b = (2*act) + cpi->activity_avg;
   1.353 +
   1.354 +    x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
   1.355 +    x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
   1.356 +    x->errorperbit += (x->errorperbit==0);
   1.357 +#endif
   1.358 +
   1.359 +    /* Activity based Zbin adjustment */
   1.360 +    adjust_act_zbin(cpi, x);
   1.361 +}
   1.362 +
   1.363 +static
   1.364 +void encode_mb_row(VP8_COMP *cpi,
   1.365 +                   VP8_COMMON *cm,
   1.366 +                   int mb_row,
   1.367 +                   MACROBLOCK  *x,
   1.368 +                   MACROBLOCKD *xd,
   1.369 +                   TOKENEXTRA **tp,
   1.370 +                   int *segment_counts,
   1.371 +                   int *totalrate)
   1.372 +{
   1.373 +    int recon_yoffset, recon_uvoffset;
   1.374 +    int mb_col;
   1.375 +    int ref_fb_idx = cm->lst_fb_idx;
   1.376 +    int dst_fb_idx = cm->new_fb_idx;
   1.377 +    int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
   1.378 +    int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
   1.379 +    int map_index = (mb_row * cpi->common.mb_cols);
   1.380 +
   1.381 +#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
   1.382 +    const int num_part = (1 << cm->multi_token_partition);
   1.383 +    TOKENEXTRA * tp_start = cpi->tok;
   1.384 +    vp8_writer *w;
   1.385 +#endif
   1.386 +
   1.387 +#if CONFIG_MULTITHREAD
   1.388 +    const int nsync = cpi->mt_sync_range;
   1.389 +    const int rightmost_col = cm->mb_cols + nsync;
   1.390 +    volatile const int *last_row_current_mb_col;
   1.391 +    volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
   1.392 +
   1.393 +    if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
   1.394 +        last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
   1.395 +    else
   1.396 +        last_row_current_mb_col = &rightmost_col;
   1.397 +#endif
   1.398 +
   1.399 +#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
   1.400 +    if(num_part > 1)
   1.401 +        w= &cpi->bc[1 + (mb_row % num_part)];
   1.402 +    else
   1.403 +        w = &cpi->bc[1];
   1.404 +#endif
   1.405 +
   1.406 +    /* reset above block coeffs */
   1.407 +    xd->above_context = cm->above_context;
   1.408 +
   1.409 +    xd->up_available = (mb_row != 0);
   1.410 +    recon_yoffset = (mb_row * recon_y_stride * 16);
   1.411 +    recon_uvoffset = (mb_row * recon_uv_stride * 8);
   1.412 +
   1.413 +    cpi->tplist[mb_row].start = *tp;
   1.414 +    /* printf("Main mb_row = %d\n", mb_row); */
   1.415 +
   1.416 +    /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
   1.417 +     * units as they are always compared to values that are in 1/8th pel
   1.418 +     */
   1.419 +    xd->mb_to_top_edge = -((mb_row * 16) << 3);
   1.420 +    xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
   1.421 +
   1.422 +    /* Set up limit values for vertical motion vector components
   1.423 +     * to prevent them extending beyond the UMV borders
   1.424 +     */
   1.425 +    x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
   1.426 +    x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
   1.427 +                        + (VP8BORDERINPIXELS - 16);
   1.428 +
   1.429 +    /* Set the mb activity pointer to the start of the row. */
   1.430 +    x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
   1.431 +
   1.432 +    /* for each macroblock col in image */
   1.433 +    for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
   1.434 +    {
   1.435 +
   1.436 +#if  (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
   1.437 +        *tp = cpi->tok;
   1.438 +#endif
   1.439 +        /* Distance of Mb to the left & right edges, specified in
   1.440 +         * 1/8th pel units as they are always compared to values
   1.441 +         * that are in 1/8th pel units
   1.442 +         */
   1.443 +        xd->mb_to_left_edge = -((mb_col * 16) << 3);
   1.444 +        xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
   1.445 +
   1.446 +        /* Set up limit values for horizontal motion vector components
   1.447 +         * to prevent them extending beyond the UMV borders
   1.448 +         */
   1.449 +        x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
   1.450 +        x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
   1.451 +                            + (VP8BORDERINPIXELS - 16);
   1.452 +
   1.453 +        xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
   1.454 +        xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
   1.455 +        xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
   1.456 +        xd->left_available = (mb_col != 0);
   1.457 +
   1.458 +        x->rddiv = cpi->RDDIV;
   1.459 +        x->rdmult = cpi->RDMULT;
   1.460 +
   1.461 +        /* Copy current mb to a buffer */
   1.462 +        vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
   1.463 +
   1.464 +#if CONFIG_MULTITHREAD
   1.465 +        if (cpi->b_multi_threaded != 0)
   1.466 +        {
   1.467 +            *current_mb_col = mb_col - 1; /* set previous MB done */
   1.468 +
   1.469 +            if ((mb_col & (nsync - 1)) == 0)
   1.470 +            {
   1.471 +                while (mb_col > (*last_row_current_mb_col - nsync))
   1.472 +                {
   1.473 +                    x86_pause_hint();
   1.474 +                    thread_sleep(0);
   1.475 +                }
   1.476 +            }
   1.477 +        }
   1.478 +#endif
   1.479 +
   1.480 +        if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
   1.481 +            vp8_activity_masking(cpi, x);
   1.482 +
   1.483 +        /* Is segmentation enabled */
   1.484 +        /* MB level adjustment to quantizer */
   1.485 +        if (xd->segmentation_enabled)
   1.486 +        {
   1.487 +            /* Code to set segment id in xd->mbmi.segment_id for current MB
   1.488 +             * (with range checking)
   1.489 +             */
   1.490 +            if (cpi->segmentation_map[map_index+mb_col] <= 3)
   1.491 +                xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
   1.492 +            else
   1.493 +                xd->mode_info_context->mbmi.segment_id = 0;
   1.494 +
   1.495 +            vp8cx_mb_init_quantizer(cpi, x, 1);
   1.496 +        }
   1.497 +        else
   1.498 +            /* Set to Segment 0 by default */
   1.499 +            xd->mode_info_context->mbmi.segment_id = 0;
   1.500 +
   1.501 +        x->active_ptr = cpi->active_map + map_index + mb_col;
   1.502 +
   1.503 +        if (cm->frame_type == KEY_FRAME)
   1.504 +        {
   1.505 +            *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
   1.506 +#ifdef MODE_STATS
   1.507 +            y_modes[xd->mbmi.mode] ++;
   1.508 +#endif
   1.509 +        }
   1.510 +        else
   1.511 +        {
   1.512 +            *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
   1.513 +
   1.514 +#ifdef MODE_STATS
   1.515 +            inter_y_modes[xd->mbmi.mode] ++;
   1.516 +
   1.517 +            if (xd->mbmi.mode == SPLITMV)
   1.518 +            {
   1.519 +                int b;
   1.520 +
   1.521 +                for (b = 0; b < xd->mbmi.partition_count; b++)
   1.522 +                {
   1.523 +                    inter_b_modes[x->partition->bmi[b].mode] ++;
   1.524 +                }
   1.525 +            }
   1.526 +
   1.527 +#endif
   1.528 +
   1.529 +            /* Special case code for cyclic refresh
   1.530 +             * If cyclic update enabled then copy xd->mbmi.segment_id; (which
   1.531 +             * may have been updated based on mode during
   1.532 +             * vp8cx_encode_inter_macroblock()) back into the global
   1.533 +             * segmentation map
   1.534 +             */
   1.535 +            if ((cpi->current_layer == 0) &&
   1.536 +                (cpi->cyclic_refresh_mode_enabled &&
   1.537 +                 xd->segmentation_enabled))
   1.538 +            {
   1.539 +                cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
   1.540 +
   1.541 +                /* If the block has been refreshed mark it as clean (the
   1.542 +                 * magnitude of the -ve influences how long it will be before
   1.543 +                 * we consider another refresh):
   1.544 +                 * Else if it was coded (last frame 0,0) and has not already
   1.545 +                 * been refreshed then mark it as a candidate for cleanup
   1.546 +                 * next time (marked 0) else mark it as dirty (1).
   1.547 +                 */
   1.548 +                if (xd->mode_info_context->mbmi.segment_id)
   1.549 +                    cpi->cyclic_refresh_map[map_index+mb_col] = -1;
   1.550 +                else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
   1.551 +                {
   1.552 +                    if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
   1.553 +                        cpi->cyclic_refresh_map[map_index+mb_col] = 0;
   1.554 +                }
   1.555 +                else
   1.556 +                    cpi->cyclic_refresh_map[map_index+mb_col] = 1;
   1.557 +
   1.558 +            }
   1.559 +        }
   1.560 +
   1.561 +        cpi->tplist[mb_row].stop = *tp;
   1.562 +
   1.563 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.564 +        /* pack tokens for this MB */
   1.565 +        {
   1.566 +            int tok_count = *tp - tp_start;
   1.567 +            pack_tokens(w, tp_start, tok_count);
   1.568 +        }
   1.569 +#endif
   1.570 +        /* Increment pointer into gf usage flags structure. */
   1.571 +        x->gf_active_ptr++;
   1.572 +
   1.573 +        /* Increment the activity mask pointers. */
   1.574 +        x->mb_activity_ptr++;
   1.575 +
   1.576 +        /* adjust to the next column of macroblocks */
   1.577 +        x->src.y_buffer += 16;
   1.578 +        x->src.u_buffer += 8;
   1.579 +        x->src.v_buffer += 8;
   1.580 +
   1.581 +        recon_yoffset += 16;
   1.582 +        recon_uvoffset += 8;
   1.583 +
   1.584 +        /* Keep track of segment usage */
   1.585 +        segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
   1.586 +
   1.587 +        /* skip to next mb */
   1.588 +        xd->mode_info_context++;
   1.589 +        x->partition_info++;
   1.590 +        xd->above_context++;
   1.591 +    }
   1.592 +
   1.593 +    /* extend the recon for intra prediction */
   1.594 +    vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx],
   1.595 +                        xd->dst.y_buffer + 16,
   1.596 +                        xd->dst.u_buffer + 8,
   1.597 +                        xd->dst.v_buffer + 8);
   1.598 +
   1.599 +#if CONFIG_MULTITHREAD
   1.600 +    if (cpi->b_multi_threaded != 0)
   1.601 +        *current_mb_col = rightmost_col;
   1.602 +#endif
   1.603 +
   1.604 +    /* this is to account for the border */
   1.605 +    xd->mode_info_context++;
   1.606 +    x->partition_info++;
   1.607 +}
   1.608 +
   1.609 +static void init_encode_frame_mb_context(VP8_COMP *cpi)
   1.610 +{
   1.611 +    MACROBLOCK *const x = & cpi->mb;
   1.612 +    VP8_COMMON *const cm = & cpi->common;
   1.613 +    MACROBLOCKD *const xd = & x->e_mbd;
   1.614 +
   1.615 +    /* GF active flags data structure */
   1.616 +    x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
   1.617 +
   1.618 +    /* Activity map pointer */
   1.619 +    x->mb_activity_ptr = cpi->mb_activity_map;
   1.620 +
   1.621 +    x->act_zbin_adj = 0;
   1.622 +
   1.623 +    x->partition_info = x->pi;
   1.624 +
   1.625 +    xd->mode_info_context = cm->mi;
   1.626 +    xd->mode_info_stride = cm->mode_info_stride;
   1.627 +
   1.628 +    xd->frame_type = cm->frame_type;
   1.629 +
   1.630 +    /* reset intra mode contexts */
   1.631 +    if (cm->frame_type == KEY_FRAME)
   1.632 +        vp8_init_mbmode_probs(cm);
   1.633 +
   1.634 +    /* Copy data over into macro block data structures. */
   1.635 +    x->src = * cpi->Source;
   1.636 +    xd->pre = cm->yv12_fb[cm->lst_fb_idx];
   1.637 +    xd->dst = cm->yv12_fb[cm->new_fb_idx];
   1.638 +
   1.639 +    /* set up frame for intra coded blocks */
   1.640 +    vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
   1.641 +
   1.642 +    vp8_build_block_offsets(x);
   1.643 +
   1.644 +    xd->mode_info_context->mbmi.mode = DC_PRED;
   1.645 +    xd->mode_info_context->mbmi.uv_mode = DC_PRED;
   1.646 +
   1.647 +    xd->left_context = &cm->left_context;
   1.648 +
   1.649 +    x->mvc = cm->fc.mvc;
   1.650 +
   1.651 +    vpx_memset(cm->above_context, 0,
   1.652 +               sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
   1.653 +
   1.654 +    /* Special case treatment when GF and ARF are not sensible options
   1.655 +     * for reference
   1.656 +     */
   1.657 +    if (cpi->ref_frame_flags == VP8_LAST_FRAME)
   1.658 +        vp8_calc_ref_frame_costs(x->ref_frame_cost,
   1.659 +                                 cpi->prob_intra_coded,255,128);
   1.660 +    else if ((cpi->oxcf.number_of_layers > 1) &&
   1.661 +               (cpi->ref_frame_flags == VP8_GOLD_FRAME))
   1.662 +        vp8_calc_ref_frame_costs(x->ref_frame_cost,
   1.663 +                                 cpi->prob_intra_coded,1,255);
   1.664 +    else if ((cpi->oxcf.number_of_layers > 1) &&
   1.665 +                (cpi->ref_frame_flags == VP8_ALTR_FRAME))
   1.666 +        vp8_calc_ref_frame_costs(x->ref_frame_cost,
   1.667 +                                 cpi->prob_intra_coded,1,1);
   1.668 +    else
   1.669 +        vp8_calc_ref_frame_costs(x->ref_frame_cost,
   1.670 +                                 cpi->prob_intra_coded,
   1.671 +                                 cpi->prob_last_coded,
   1.672 +                                 cpi->prob_gf_coded);
   1.673 +
   1.674 +    xd->fullpixel_mask = 0xffffffff;
   1.675 +    if(cm->full_pixel)
   1.676 +        xd->fullpixel_mask = 0xfffffff8;
   1.677 +
   1.678 +    vp8_zero(x->coef_counts);
   1.679 +    vp8_zero(x->ymode_count);
   1.680 +    vp8_zero(x->uv_mode_count)
   1.681 +    x->prediction_error = 0;
   1.682 +    x->intra_error = 0;
   1.683 +    vp8_zero(x->count_mb_ref_frame_usage);
   1.684 +}
   1.685 +
   1.686 +static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
   1.687 +{
   1.688 +    int i = 0;
   1.689 +    do
   1.690 +    {
   1.691 +        int j = 0;
   1.692 +        do
   1.693 +        {
   1.694 +            int k = 0;
   1.695 +            do
   1.696 +            {
   1.697 +                /* at every context */
   1.698 +
   1.699 +                /* calc probs and branch cts for this frame only */
   1.700 +                int t = 0;      /* token/prob index */
   1.701 +
   1.702 +                do
   1.703 +                {
   1.704 +                    x->coef_counts [i][j][k][t] +=
   1.705 +                        x_thread->coef_counts [i][j][k][t];
   1.706 +                }
   1.707 +                while (++t < ENTROPY_NODES);
   1.708 +            }
   1.709 +            while (++k < PREV_COEF_CONTEXTS);
   1.710 +        }
   1.711 +        while (++j < COEF_BANDS);
   1.712 +    }
   1.713 +    while (++i < BLOCK_TYPES);
   1.714 +}
   1.715 +
   1.716 +void vp8_encode_frame(VP8_COMP *cpi)
   1.717 +{
   1.718 +    int mb_row;
   1.719 +    MACROBLOCK *const x = & cpi->mb;
   1.720 +    VP8_COMMON *const cm = & cpi->common;
   1.721 +    MACROBLOCKD *const xd = & x->e_mbd;
   1.722 +    TOKENEXTRA *tp = cpi->tok;
   1.723 +    int segment_counts[MAX_MB_SEGMENTS];
   1.724 +    int totalrate;
   1.725 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.726 +    BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */
   1.727 +    const int num_part = (1 << cm->multi_token_partition);
   1.728 +#endif
   1.729 +
   1.730 +    vpx_memset(segment_counts, 0, sizeof(segment_counts));
   1.731 +    totalrate = 0;
   1.732 +
   1.733 +    if (cpi->compressor_speed == 2)
   1.734 +    {
   1.735 +        if (cpi->oxcf.cpu_used < 0)
   1.736 +            cpi->Speed = -(cpi->oxcf.cpu_used);
   1.737 +        else
   1.738 +            vp8_auto_select_speed(cpi);
   1.739 +    }
   1.740 +
   1.741 +    /* Functions setup for all frame types so we can use MC in AltRef */
   1.742 +    if(!cm->use_bilinear_mc_filter)
   1.743 +    {
   1.744 +        xd->subpixel_predict        = vp8_sixtap_predict4x4;
   1.745 +        xd->subpixel_predict8x4     = vp8_sixtap_predict8x4;
   1.746 +        xd->subpixel_predict8x8     = vp8_sixtap_predict8x8;
   1.747 +        xd->subpixel_predict16x16   = vp8_sixtap_predict16x16;
   1.748 +    }
   1.749 +    else
   1.750 +    {
   1.751 +        xd->subpixel_predict        = vp8_bilinear_predict4x4;
   1.752 +        xd->subpixel_predict8x4     = vp8_bilinear_predict8x4;
   1.753 +        xd->subpixel_predict8x8     = vp8_bilinear_predict8x8;
   1.754 +        xd->subpixel_predict16x16   = vp8_bilinear_predict16x16;
   1.755 +    }
   1.756 +
   1.757 +    cpi->mb.skip_true_count = 0;
   1.758 +    cpi->tok_count = 0;
   1.759 +
   1.760 +#if 0
   1.761 +    /* Experimental code */
   1.762 +    cpi->frame_distortion = 0;
   1.763 +    cpi->last_mb_distortion = 0;
   1.764 +#endif
   1.765 +
   1.766 +    xd->mode_info_context = cm->mi;
   1.767 +
   1.768 +    vp8_zero(cpi->mb.MVcount);
   1.769 +
   1.770 +    vp8cx_frame_init_quantizer(cpi);
   1.771 +
   1.772 +    vp8_initialize_rd_consts(cpi, x,
   1.773 +                             vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
   1.774 +
   1.775 +    vp8cx_initialize_me_consts(cpi, cm->base_qindex);
   1.776 +
   1.777 +    if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
   1.778 +    {
   1.779 +        /* Initialize encode frame context. */
   1.780 +        init_encode_frame_mb_context(cpi);
   1.781 +
   1.782 +        /* Build a frame level activity map */
   1.783 +        build_activity_map(cpi);
   1.784 +    }
   1.785 +
   1.786 +    /* re-init encode frame context. */
   1.787 +    init_encode_frame_mb_context(cpi);
   1.788 +
   1.789 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.790 +    {
   1.791 +        int i;
   1.792 +        for(i = 0; i < num_part; i++)
   1.793 +        {
   1.794 +            vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
   1.795 +                    cpi->partition_d_end[i + 1]);
   1.796 +            bc[i].error = &cm->error;
   1.797 +        }
   1.798 +    }
   1.799 +
   1.800 +#endif
   1.801 +
   1.802 +    {
   1.803 +        struct vpx_usec_timer  emr_timer;
   1.804 +        vpx_usec_timer_start(&emr_timer);
   1.805 +
   1.806 +#if CONFIG_MULTITHREAD
   1.807 +        if (cpi->b_multi_threaded)
   1.808 +        {
   1.809 +            int i;
   1.810 +
   1.811 +            vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
   1.812 +                                      cpi->encoding_thread_count);
   1.813 +
   1.814 +            for (i = 0; i < cm->mb_rows; i++)
   1.815 +                cpi->mt_current_mb_col[i] = -1;
   1.816 +
   1.817 +            for (i = 0; i < cpi->encoding_thread_count; i++)
   1.818 +            {
   1.819 +                sem_post(&cpi->h_event_start_encoding[i]);
   1.820 +            }
   1.821 +
   1.822 +            for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
   1.823 +            {
   1.824 +                vp8_zero(cm->left_context)
   1.825 +
   1.826 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.827 +                tp = cpi->tok;
   1.828 +#else
   1.829 +                tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
   1.830 +#endif
   1.831 +
   1.832 +                encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
   1.833 +
   1.834 +                /* adjust to the next row of mbs */
   1.835 +                x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
   1.836 +                x->src.u_buffer +=  8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
   1.837 +                x->src.v_buffer +=  8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
   1.838 +
   1.839 +                xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
   1.840 +                x->partition_info  += xd->mode_info_stride * cpi->encoding_thread_count;
   1.841 +                x->gf_active_ptr   += cm->mb_cols * cpi->encoding_thread_count;
   1.842 +
   1.843 +                if(mb_row == cm->mb_rows - 1)
   1.844 +                {
   1.845 +                    sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
   1.846 +                }
   1.847 +            }
   1.848 +
   1.849 +            sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */
   1.850 +
   1.851 +            for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
   1.852 +            {
   1.853 +                cpi->tok_count += (unsigned int)
   1.854 +                  (cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start);
   1.855 +            }
   1.856 +
   1.857 +            if (xd->segmentation_enabled)
   1.858 +            {
   1.859 +                int j;
   1.860 +
   1.861 +                if (xd->segmentation_enabled)
   1.862 +                {
   1.863 +                    for (i = 0; i < cpi->encoding_thread_count; i++)
   1.864 +                    {
   1.865 +                        for (j = 0; j < 4; j++)
   1.866 +                            segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
   1.867 +                    }
   1.868 +                }
   1.869 +            }
   1.870 +
   1.871 +            for (i = 0; i < cpi->encoding_thread_count; i++)
   1.872 +            {
   1.873 +                int mode_count;
   1.874 +                int c_idx;
   1.875 +                totalrate += cpi->mb_row_ei[i].totalrate;
   1.876 +
   1.877 +                cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
   1.878 +
   1.879 +                for(mode_count = 0; mode_count < VP8_YMODES; mode_count++)
   1.880 +                    cpi->mb.ymode_count[mode_count] +=
   1.881 +                        cpi->mb_row_ei[i].mb.ymode_count[mode_count];
   1.882 +
   1.883 +                for(mode_count = 0; mode_count < VP8_UV_MODES; mode_count++)
   1.884 +                    cpi->mb.uv_mode_count[mode_count] +=
   1.885 +                        cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
   1.886 +
   1.887 +                for(c_idx = 0; c_idx < MVvals; c_idx++)
   1.888 +                {
   1.889 +                    cpi->mb.MVcount[0][c_idx] +=
   1.890 +                        cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
   1.891 +                    cpi->mb.MVcount[1][c_idx] +=
   1.892 +                        cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
   1.893 +                }
   1.894 +
   1.895 +                cpi->mb.prediction_error +=
   1.896 +                    cpi->mb_row_ei[i].mb.prediction_error;
   1.897 +                cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
   1.898 +
   1.899 +                for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++)
   1.900 +                    cpi->mb.count_mb_ref_frame_usage[c_idx] +=
   1.901 +                        cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
   1.902 +
   1.903 +                for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++)
   1.904 +                    cpi->mb.error_bins[c_idx] +=
   1.905 +                        cpi->mb_row_ei[i].mb.error_bins[c_idx];
   1.906 +
   1.907 +                /* add up counts for each thread */
   1.908 +                sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
   1.909 +            }
   1.910 +
   1.911 +        }
   1.912 +        else
   1.913 +#endif
   1.914 +        {
   1.915 +
   1.916 +            /* for each macroblock row in image */
   1.917 +            for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
   1.918 +            {
   1.919 +                vp8_zero(cm->left_context)
   1.920 +
   1.921 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.922 +                tp = cpi->tok;
   1.923 +#endif
   1.924 +
   1.925 +                encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
   1.926 +
   1.927 +                /* adjust to the next row of mbs */
   1.928 +                x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
   1.929 +                x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
   1.930 +                x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
   1.931 +            }
   1.932 +
   1.933 +            cpi->tok_count = (unsigned int)(tp - cpi->tok);
   1.934 +        }
   1.935 +
   1.936 +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
   1.937 +        {
   1.938 +            int i;
   1.939 +            for(i = 0; i < num_part; i++)
   1.940 +            {
   1.941 +                vp8_stop_encode(&bc[i]);
   1.942 +                cpi->partition_sz[i+1] = bc[i].pos;
   1.943 +            }
   1.944 +        }
   1.945 +#endif
   1.946 +
   1.947 +        vpx_usec_timer_mark(&emr_timer);
   1.948 +        cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
   1.949 +    }
   1.950 +
   1.951 +
   1.952 +    // Work out the segment probabilities if segmentation is enabled
   1.953 +    // and needs to be updated
   1.954 +    if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
   1.955 +    {
   1.956 +        int tot_count;
   1.957 +        int i;
   1.958 +
   1.959 +        /* Set to defaults */
   1.960 +        vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
   1.961 +
   1.962 +        tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
   1.963 +
   1.964 +        if (tot_count)
   1.965 +        {
   1.966 +            xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
   1.967 +
   1.968 +            tot_count = segment_counts[0] + segment_counts[1];
   1.969 +
   1.970 +            if (tot_count > 0)
   1.971 +            {
   1.972 +                xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
   1.973 +            }
   1.974 +
   1.975 +            tot_count = segment_counts[2] + segment_counts[3];
   1.976 +
   1.977 +            if (tot_count > 0)
   1.978 +                xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
   1.979 +
   1.980 +            /* Zero probabilities not allowed */
   1.981 +            for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
   1.982 +            {
   1.983 +                if (xd->mb_segment_tree_probs[i] == 0)
   1.984 +                    xd->mb_segment_tree_probs[i] = 1;
   1.985 +            }
   1.986 +        }
   1.987 +    }
   1.988 +
   1.989 +    /* projected_frame_size in units of BYTES */
   1.990 +    cpi->projected_frame_size = totalrate >> 8;
   1.991 +
   1.992 +    /* Make a note of the percentage MBs coded Intra. */
   1.993 +    if (cm->frame_type == KEY_FRAME)
   1.994 +    {
   1.995 +        cpi->this_frame_percent_intra = 100;
   1.996 +    }
   1.997 +    else
   1.998 +    {
   1.999 +        int tot_modes;
  1.1000 +
  1.1001 +        tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]
  1.1002 +                    + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]
  1.1003 +                    + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]
  1.1004 +                    + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
  1.1005 +
  1.1006 +        if (tot_modes)
  1.1007 +            cpi->this_frame_percent_intra =
  1.1008 +                cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
  1.1009 +
  1.1010 +    }
  1.1011 +
  1.1012 +#if ! CONFIG_REALTIME_ONLY
  1.1013 +    /* Adjust the projected reference frame usage probability numbers to
  1.1014 +     * reflect what we have just seen. This may be useful when we make
  1.1015 +     * multiple iterations of the recode loop rather than continuing to use
  1.1016 +     * values from the previous frame.
  1.1017 +     */
  1.1018 +    if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
  1.1019 +        (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)))
  1.1020 +    {
  1.1021 +      vp8_convert_rfct_to_prob(cpi);
  1.1022 +    }
  1.1023 +#endif
  1.1024 +}
  1.1025 +void vp8_setup_block_ptrs(MACROBLOCK *x)
  1.1026 +{
  1.1027 +    int r, c;
  1.1028 +    int i;
  1.1029 +
  1.1030 +    for (r = 0; r < 4; r++)
  1.1031 +    {
  1.1032 +        for (c = 0; c < 4; c++)
  1.1033 +        {
  1.1034 +            x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
  1.1035 +        }
  1.1036 +    }
  1.1037 +
  1.1038 +    for (r = 0; r < 2; r++)
  1.1039 +    {
  1.1040 +        for (c = 0; c < 2; c++)
  1.1041 +        {
  1.1042 +            x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
  1.1043 +        }
  1.1044 +    }
  1.1045 +
  1.1046 +
  1.1047 +    for (r = 0; r < 2; r++)
  1.1048 +    {
  1.1049 +        for (c = 0; c < 2; c++)
  1.1050 +        {
  1.1051 +            x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
  1.1052 +        }
  1.1053 +    }
  1.1054 +
  1.1055 +    x->block[24].src_diff = x->src_diff + 384;
  1.1056 +
  1.1057 +
  1.1058 +    for (i = 0; i < 25; i++)
  1.1059 +    {
  1.1060 +        x->block[i].coeff = x->coeff + i * 16;
  1.1061 +    }
  1.1062 +}
  1.1063 +
  1.1064 +void vp8_build_block_offsets(MACROBLOCK *x)
  1.1065 +{
  1.1066 +    int block = 0;
  1.1067 +    int br, bc;
  1.1068 +
  1.1069 +    vp8_build_block_doffsets(&x->e_mbd);
  1.1070 +
  1.1071 +    /* y blocks */
  1.1072 +    x->thismb_ptr = &x->thismb[0];
  1.1073 +    for (br = 0; br < 4; br++)
  1.1074 +    {
  1.1075 +        for (bc = 0; bc < 4; bc++)
  1.1076 +        {
  1.1077 +            BLOCK *this_block = &x->block[block];
  1.1078 +            this_block->base_src = &x->thismb_ptr;
  1.1079 +            this_block->src_stride = 16;
  1.1080 +            this_block->src = 4 * br * 16 + 4 * bc;
  1.1081 +            ++block;
  1.1082 +        }
  1.1083 +    }
  1.1084 +
  1.1085 +    /* u blocks */
  1.1086 +    for (br = 0; br < 2; br++)
  1.1087 +    {
  1.1088 +        for (bc = 0; bc < 2; bc++)
  1.1089 +        {
  1.1090 +            BLOCK *this_block = &x->block[block];
  1.1091 +            this_block->base_src = &x->src.u_buffer;
  1.1092 +            this_block->src_stride = x->src.uv_stride;
  1.1093 +            this_block->src = 4 * br * this_block->src_stride + 4 * bc;
  1.1094 +            ++block;
  1.1095 +        }
  1.1096 +    }
  1.1097 +
  1.1098 +    /* v blocks */
  1.1099 +    for (br = 0; br < 2; br++)
  1.1100 +    {
  1.1101 +        for (bc = 0; bc < 2; bc++)
  1.1102 +        {
  1.1103 +            BLOCK *this_block = &x->block[block];
  1.1104 +            this_block->base_src = &x->src.v_buffer;
  1.1105 +            this_block->src_stride = x->src.uv_stride;
  1.1106 +            this_block->src = 4 * br * this_block->src_stride + 4 * bc;
  1.1107 +            ++block;
  1.1108 +        }
  1.1109 +    }
  1.1110 +}
  1.1111 +
  1.1112 +static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
  1.1113 +{
  1.1114 +    const MACROBLOCKD *xd = & x->e_mbd;
  1.1115 +    const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
  1.1116 +    const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
  1.1117 +
  1.1118 +#ifdef MODE_STATS
  1.1119 +    const int is_key = cpi->common.frame_type == KEY_FRAME;
  1.1120 +
  1.1121 +    ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
  1.1122 +
  1.1123 +    if (m == B_PRED)
  1.1124 +    {
  1.1125 +        unsigned int *const bct = is_key ? b_modes : inter_b_modes;
  1.1126 +
  1.1127 +        int b = 0;
  1.1128 +
  1.1129 +        do
  1.1130 +        {
  1.1131 +            ++ bct[xd->block[b].bmi.mode];
  1.1132 +        }
  1.1133 +        while (++b < 16);
  1.1134 +    }
  1.1135 +
  1.1136 +#endif
  1.1137 +
  1.1138 +    ++x->ymode_count[m];
  1.1139 +    ++x->uv_mode_count[uvm];
  1.1140 +
  1.1141 +}
  1.1142 +
  1.1143 +/* Experimental stub function to create a per MB zbin adjustment based on
  1.1144 + * some previously calculated measure of MB activity.
  1.1145 + */
  1.1146 +static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
  1.1147 +{
  1.1148 +#if USE_ACT_INDEX
  1.1149 +    x->act_zbin_adj = *(x->mb_activity_ptr);
  1.1150 +#else
  1.1151 +    int64_t a;
  1.1152 +    int64_t b;
  1.1153 +    int64_t act = *(x->mb_activity_ptr);
  1.1154 +
  1.1155 +    /* Apply the masking to the RD multiplier. */
  1.1156 +    a = act + 4*cpi->activity_avg;
  1.1157 +    b = 4*act + cpi->activity_avg;
  1.1158 +
  1.1159 +    if ( act > cpi->activity_avg )
  1.1160 +        x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
  1.1161 +    else
  1.1162 +        x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
  1.1163 +#endif
  1.1164 +}
  1.1165 +
  1.1166 +int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
  1.1167 +                                  TOKENEXTRA **t)
  1.1168 +{
  1.1169 +    MACROBLOCKD *xd = &x->e_mbd;
  1.1170 +    int rate;
  1.1171 +
  1.1172 +    if (cpi->sf.RD && cpi->compressor_speed != 2)
  1.1173 +        vp8_rd_pick_intra_mode(x, &rate);
  1.1174 +    else
  1.1175 +        vp8_pick_intra_mode(x, &rate);
  1.1176 +
  1.1177 +    if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
  1.1178 +    {
  1.1179 +        adjust_act_zbin( cpi, x );
  1.1180 +        vp8_update_zbin_extra(cpi, x);
  1.1181 +    }
  1.1182 +
  1.1183 +    if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
  1.1184 +        vp8_encode_intra4x4mby(x);
  1.1185 +    else
  1.1186 +        vp8_encode_intra16x16mby(x);
  1.1187 +
  1.1188 +    vp8_encode_intra16x16mbuv(x);
  1.1189 +
  1.1190 +    sum_intra_stats(cpi, x);
  1.1191 +
  1.1192 +    vp8_tokenize_mb(cpi, x, t);
  1.1193 +
  1.1194 +    if (xd->mode_info_context->mbmi.mode != B_PRED)
  1.1195 +        vp8_inverse_transform_mby(xd);
  1.1196 +
  1.1197 +    vp8_dequant_idct_add_uv_block
  1.1198 +                    (xd->qcoeff+16*16, xd->dequant_uv,
  1.1199 +                     xd->dst.u_buffer, xd->dst.v_buffer,
  1.1200 +                     xd->dst.uv_stride, xd->eobs+16);
  1.1201 +    return rate;
  1.1202 +}
  1.1203 +#ifdef SPEEDSTATS
  1.1204 +extern int cnt_pm;
  1.1205 +#endif
  1.1206 +
  1.1207 +extern void vp8_fix_contexts(MACROBLOCKD *x);
  1.1208 +
  1.1209 +int vp8cx_encode_inter_macroblock
  1.1210 +(
  1.1211 +    VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
  1.1212 +    int recon_yoffset, int recon_uvoffset,
  1.1213 +    int mb_row, int mb_col
  1.1214 +)
  1.1215 +{
  1.1216 +    MACROBLOCKD *const xd = &x->e_mbd;
  1.1217 +    int intra_error = 0;
  1.1218 +    int rate;
  1.1219 +    int distortion;
  1.1220 +
  1.1221 +    x->skip = 0;
  1.1222 +
  1.1223 +    if (xd->segmentation_enabled)
  1.1224 +        x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
  1.1225 +    else
  1.1226 +        x->encode_breakout = cpi->oxcf.encode_breakout;
  1.1227 +
  1.1228 +#if CONFIG_TEMPORAL_DENOISING
  1.1229 +    /* Reset the best sse mode/mv for each macroblock. */
  1.1230 +    x->best_reference_frame = INTRA_FRAME;
  1.1231 +    x->best_zeromv_reference_frame = INTRA_FRAME;
  1.1232 +    x->best_sse_inter_mode = 0;
  1.1233 +    x->best_sse_mv.as_int = 0;
  1.1234 +    x->need_to_clamp_best_mvs = 0;
  1.1235 +#endif
  1.1236 +
  1.1237 +    if (cpi->sf.RD)
  1.1238 +    {
  1.1239 +        int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
  1.1240 +
  1.1241 +        /* Are we using the fast quantizer for the mode selection? */
  1.1242 +        if(cpi->sf.use_fastquant_for_pick)
  1.1243 +        {
  1.1244 +            x->quantize_b      = vp8_fast_quantize_b;
  1.1245 +            x->quantize_b_pair = vp8_fast_quantize_b_pair;
  1.1246 +
  1.1247 +            /* the fast quantizer does not use zbin_extra, so
  1.1248 +             * do not recalculate */
  1.1249 +            x->zbin_mode_boost_enabled = 0;
  1.1250 +        }
  1.1251 +        vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
  1.1252 +                               &distortion, &intra_error);
  1.1253 +
  1.1254 +        /* switch back to the regular quantizer for the encode */
  1.1255 +        if (cpi->sf.improved_quant)
  1.1256 +        {
  1.1257 +            x->quantize_b      = vp8_regular_quantize_b;
  1.1258 +            x->quantize_b_pair = vp8_regular_quantize_b_pair;
  1.1259 +        }
  1.1260 +
  1.1261 +        /* restore cpi->zbin_mode_boost_enabled */
  1.1262 +        x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
  1.1263 +
  1.1264 +    }
  1.1265 +    else
  1.1266 +    {
  1.1267 +        vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
  1.1268 +                            &distortion, &intra_error, mb_row, mb_col);
  1.1269 +    }
  1.1270 +
  1.1271 +    x->prediction_error += distortion;
  1.1272 +    x->intra_error += intra_error;
  1.1273 +
  1.1274 +    if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
  1.1275 +    {
  1.1276 +        /* Adjust the zbin based on this MB rate. */
  1.1277 +        adjust_act_zbin( cpi, x );
  1.1278 +    }
  1.1279 +
  1.1280 +#if 0
  1.1281 +    /* Experimental RD code */
  1.1282 +    cpi->frame_distortion += distortion;
  1.1283 +    cpi->last_mb_distortion = distortion;
  1.1284 +#endif
  1.1285 +
  1.1286 +    /* MB level adjutment to quantizer setup */
  1.1287 +    if (xd->segmentation_enabled)
  1.1288 +    {
  1.1289 +        /* If cyclic update enabled */
  1.1290 +        if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled)
  1.1291 +        {
  1.1292 +            /* Clear segment_id back to 0 if not coded (last frame 0,0) */
  1.1293 +            if ((xd->mode_info_context->mbmi.segment_id == 1) &&
  1.1294 +                ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
  1.1295 +            {
  1.1296 +                xd->mode_info_context->mbmi.segment_id = 0;
  1.1297 +
  1.1298 +                /* segment_id changed, so update */
  1.1299 +                vp8cx_mb_init_quantizer(cpi, x, 1);
  1.1300 +            }
  1.1301 +        }
  1.1302 +    }
  1.1303 +
  1.1304 +    {
  1.1305 +        /* Experimental code.
  1.1306 +         * Special case for gf and arf zeromv modes, for 1 temporal layer.
  1.1307 +         * Increase zbin size to supress noise.
  1.1308 +         */
  1.1309 +        x->zbin_mode_boost = 0;
  1.1310 +        if (x->zbin_mode_boost_enabled)
  1.1311 +        {
  1.1312 +            if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
  1.1313 +            {
  1.1314 +                if (xd->mode_info_context->mbmi.mode == ZEROMV)
  1.1315 +                {
  1.1316 +                    if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
  1.1317 +                        cpi->oxcf.number_of_layers == 1)
  1.1318 +                        x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
  1.1319 +                    else
  1.1320 +                        x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
  1.1321 +                }
  1.1322 +                else if (xd->mode_info_context->mbmi.mode == SPLITMV)
  1.1323 +                    x->zbin_mode_boost = 0;
  1.1324 +                else
  1.1325 +                    x->zbin_mode_boost = MV_ZBIN_BOOST;
  1.1326 +            }
  1.1327 +        }
  1.1328 +
  1.1329 +        /* The fast quantizer doesn't use zbin_extra, only do so with
  1.1330 +         * the regular quantizer. */
  1.1331 +        if (cpi->sf.improved_quant)
  1.1332 +            vp8_update_zbin_extra(cpi, x);
  1.1333 +    }
  1.1334 +
  1.1335 +    x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
  1.1336 +
  1.1337 +    if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
  1.1338 +    {
  1.1339 +        vp8_encode_intra16x16mbuv(x);
  1.1340 +
  1.1341 +        if (xd->mode_info_context->mbmi.mode == B_PRED)
  1.1342 +        {
  1.1343 +            vp8_encode_intra4x4mby(x);
  1.1344 +        }
  1.1345 +        else
  1.1346 +        {
  1.1347 +            vp8_encode_intra16x16mby(x);
  1.1348 +        }
  1.1349 +
  1.1350 +        sum_intra_stats(cpi, x);
  1.1351 +    }
  1.1352 +    else
  1.1353 +    {
  1.1354 +        int ref_fb_idx;
  1.1355 +
  1.1356 +        if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
  1.1357 +            ref_fb_idx = cpi->common.lst_fb_idx;
  1.1358 +        else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
  1.1359 +            ref_fb_idx = cpi->common.gld_fb_idx;
  1.1360 +        else
  1.1361 +            ref_fb_idx = cpi->common.alt_fb_idx;
  1.1362 +
  1.1363 +        xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
  1.1364 +        xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
  1.1365 +        xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
  1.1366 +
  1.1367 +        if (!x->skip)
  1.1368 +        {
  1.1369 +            vp8_encode_inter16x16(x);
  1.1370 +        }
  1.1371 +        else
  1.1372 +            vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
  1.1373 +                                           xd->dst.u_buffer, xd->dst.v_buffer,
  1.1374 +                                           xd->dst.y_stride, xd->dst.uv_stride);
  1.1375 +
  1.1376 +    }
  1.1377 +
  1.1378 +    if (!x->skip)
  1.1379 +    {
  1.1380 +        vp8_tokenize_mb(cpi, x, t);
  1.1381 +
  1.1382 +        if (xd->mode_info_context->mbmi.mode != B_PRED)
  1.1383 +            vp8_inverse_transform_mby(xd);
  1.1384 +
  1.1385 +        vp8_dequant_idct_add_uv_block
  1.1386 +                        (xd->qcoeff+16*16, xd->dequant_uv,
  1.1387 +                         xd->dst.u_buffer, xd->dst.v_buffer,
  1.1388 +                         xd->dst.uv_stride, xd->eobs+16);
  1.1389 +    }
  1.1390 +    else
  1.1391 +    {
  1.1392 +        /* always set mb_skip_coeff as it is needed by the loopfilter */
  1.1393 +        xd->mode_info_context->mbmi.mb_skip_coeff = 1;
  1.1394 +
  1.1395 +        if (cpi->common.mb_no_coeff_skip)
  1.1396 +        {
  1.1397 +            x->skip_true_count ++;
  1.1398 +            vp8_fix_contexts(xd);
  1.1399 +        }
  1.1400 +        else
  1.1401 +        {
  1.1402 +            vp8_stuff_mb(cpi, x, t);
  1.1403 +        }
  1.1404 +    }
  1.1405 +
  1.1406 +    return rate;
  1.1407 +}

mercurial