michael@0: /******************************************************************** michael@0: * * michael@0: * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. * michael@0: * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * michael@0: * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * michael@0: * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * michael@0: * * michael@0: * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 * michael@0: * by the Xiph.Org Foundation and contributors http://www.xiph.org/ * michael@0: * * michael@0: ******************************************************************** michael@0: michael@0: function: michael@0: last mod: $Id: decode.c 17576 2010-10-29 01:07:51Z tterribe $ michael@0: michael@0: ********************************************************************/ michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include "decint.h" michael@0: #if defined(OC_DUMP_IMAGES) michael@0: # include michael@0: # include "png.h" michael@0: #endif michael@0: #if defined(HAVE_CAIRO) michael@0: # include michael@0: #endif michael@0: michael@0: michael@0: /*No post-processing.*/ michael@0: #define OC_PP_LEVEL_DISABLED (0) michael@0: /*Keep track of DC qi for each block only.*/ michael@0: #define OC_PP_LEVEL_TRACKDCQI (1) michael@0: /*Deblock the luma plane.*/ michael@0: #define OC_PP_LEVEL_DEBLOCKY (2) michael@0: /*Dering the luma plane.*/ michael@0: #define OC_PP_LEVEL_DERINGY (3) michael@0: /*Stronger luma plane deringing.*/ michael@0: #define OC_PP_LEVEL_SDERINGY (4) michael@0: /*Deblock the chroma planes.*/ michael@0: #define OC_PP_LEVEL_DEBLOCKC (5) michael@0: /*Dering the chroma planes.*/ michael@0: #define OC_PP_LEVEL_DERINGC (6) michael@0: /*Stronger chroma plane deringing.*/ michael@0: #define OC_PP_LEVEL_SDERINGC (7) michael@0: /*Maximum valid post-processing level.*/ michael@0: #define OC_PP_LEVEL_MAX (7) michael@0: michael@0: michael@0: michael@0: /*The mode alphabets for the various mode coding schemes. michael@0: Scheme 0 uses a custom alphabet, which is not stored in this table.*/ michael@0: static const unsigned char OC_MODE_ALPHABETS[7][OC_NMODES]={ michael@0: /*Last MV dominates */ michael@0: { michael@0: OC_MODE_INTER_MV_LAST,OC_MODE_INTER_MV_LAST2,OC_MODE_INTER_MV, michael@0: OC_MODE_INTER_NOMV,OC_MODE_INTRA,OC_MODE_GOLDEN_NOMV,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: { michael@0: OC_MODE_INTER_MV_LAST,OC_MODE_INTER_MV_LAST2,OC_MODE_INTER_NOMV, michael@0: OC_MODE_INTER_MV,OC_MODE_INTRA,OC_MODE_GOLDEN_NOMV,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: { michael@0: OC_MODE_INTER_MV_LAST,OC_MODE_INTER_MV,OC_MODE_INTER_MV_LAST2, michael@0: OC_MODE_INTER_NOMV,OC_MODE_INTRA,OC_MODE_GOLDEN_NOMV,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: { michael@0: OC_MODE_INTER_MV_LAST,OC_MODE_INTER_MV,OC_MODE_INTER_NOMV, michael@0: OC_MODE_INTER_MV_LAST2,OC_MODE_INTRA,OC_MODE_GOLDEN_NOMV, michael@0: OC_MODE_GOLDEN_MV,OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: /*No MV dominates.*/ michael@0: { michael@0: OC_MODE_INTER_NOMV,OC_MODE_INTER_MV_LAST,OC_MODE_INTER_MV_LAST2, michael@0: OC_MODE_INTER_MV,OC_MODE_INTRA,OC_MODE_GOLDEN_NOMV,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: { michael@0: OC_MODE_INTER_NOMV,OC_MODE_GOLDEN_NOMV,OC_MODE_INTER_MV_LAST, michael@0: OC_MODE_INTER_MV_LAST2,OC_MODE_INTER_MV,OC_MODE_INTRA,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: }, michael@0: /*Default ordering.*/ michael@0: { michael@0: OC_MODE_INTER_NOMV,OC_MODE_INTRA,OC_MODE_INTER_MV,OC_MODE_INTER_MV_LAST, michael@0: OC_MODE_INTER_MV_LAST2,OC_MODE_GOLDEN_NOMV,OC_MODE_GOLDEN_MV, michael@0: OC_MODE_INTER_MV_FOUR michael@0: } michael@0: }; michael@0: michael@0: michael@0: /*The original DCT tokens are extended and reordered during the construction of michael@0: the Huffman tables. michael@0: The extension means more bits can be read with fewer calls to the bitpacker michael@0: during the Huffman decoding process (at the cost of larger Huffman tables), michael@0: and fewer tokens require additional extra bits (reducing the average storage michael@0: per decoded token). michael@0: The revised ordering reveals essential information in the token value michael@0: itself; specifically, whether or not there are additional extra bits to read michael@0: and the parameter to which those extra bits are applied. michael@0: The token is used to fetch a code word from the OC_DCT_CODE_WORD table below. michael@0: The extra bits are added into code word at the bit position inferred from the michael@0: token value, giving the final code word from which all required parameters michael@0: are derived. michael@0: The number of EOBs and the leading zero run length can be extracted directly. michael@0: The coefficient magnitude is optionally negated before extraction, according michael@0: to a 'flip' bit.*/ michael@0: michael@0: /*The number of additional extra bits that are decoded with each of the michael@0: internal DCT tokens.*/ michael@0: static const unsigned char OC_INTERNAL_DCT_TOKEN_EXTRA_BITS[15]={ michael@0: 12,4,3,3,4,4,5,5,8,8,8,8,3,3,6 michael@0: }; michael@0: michael@0: /*Whether or not an internal token needs any additional extra bits.*/ michael@0: #define OC_DCT_TOKEN_NEEDS_MORE(token) \ michael@0: (token<(int)(sizeof(OC_INTERNAL_DCT_TOKEN_EXTRA_BITS)/ \ michael@0: sizeof(*OC_INTERNAL_DCT_TOKEN_EXTRA_BITS))) michael@0: michael@0: /*This token (OC_DCT_REPEAT_RUN3_TOKEN) requires more than 8 extra bits.*/ michael@0: #define OC_DCT_TOKEN_FAT_EOB (0) michael@0: michael@0: /*The number of EOBs to use for an end-of-frame token. michael@0: Note: We want to set eobs to PTRDIFF_MAX here, but that requires C99, which michael@0: is not yet available everywhere; this should be equivalent.*/ michael@0: #define OC_DCT_EOB_FINISH (~(size_t)0>>1) michael@0: michael@0: /*The location of the (6) run length bits in the code word. michael@0: These are placed at index 0 and given 8 bits (even though 6 would suffice) michael@0: because it may be faster to extract the lower byte on some platforms.*/ michael@0: #define OC_DCT_CW_RLEN_SHIFT (0) michael@0: /*The location of the (12) EOB bits in the code word.*/ michael@0: #define OC_DCT_CW_EOB_SHIFT (8) michael@0: /*The location of the (1) flip bit in the code word. michael@0: This must be right under the magnitude bits.*/ michael@0: #define OC_DCT_CW_FLIP_BIT (20) michael@0: /*The location of the (11) token magnitude bits in the code word. michael@0: These must be last, and rely on a sign-extending right shift.*/ michael@0: #define OC_DCT_CW_MAG_SHIFT (21) michael@0: michael@0: /*Pack the given fields into a code word.*/ michael@0: #define OC_DCT_CW_PACK(_eobs,_rlen,_mag,_flip) \ michael@0: ((_eobs)<=0x10){ michael@0: int offs; michael@0: offs=ret&0x1F; michael@0: ret=6+offs+(int)oc_pack_read(_opb,ret-offs>>4); michael@0: } michael@0: return ret; michael@0: } michael@0: michael@0: static int oc_block_run_unpack(oc_pack_buf *_opb){ michael@0: /*Coding scheme: michael@0: Codeword Run Length michael@0: 0x 1-2 michael@0: 10x 3-4 michael@0: 110x 5-6 michael@0: 1110xx 7-10 michael@0: 11110xx 11-14 michael@0: 11111xxxx 15-30*/ michael@0: static const ogg_int16_t OC_BLOCK_RUN_TREE[61]={ michael@0: 5, michael@0: -(2<<8|1),-(2<<8|1),-(2<<8|1),-(2<<8|1), michael@0: -(2<<8|1),-(2<<8|1),-(2<<8|1),-(2<<8|1), michael@0: -(2<<8|2),-(2<<8|2),-(2<<8|2),-(2<<8|2), michael@0: -(2<<8|2),-(2<<8|2),-(2<<8|2),-(2<<8|2), michael@0: -(3<<8|3),-(3<<8|3),-(3<<8|3),-(3<<8|3), michael@0: -(3<<8|4),-(3<<8|4),-(3<<8|4),-(3<<8|4), michael@0: -(4<<8|5),-(4<<8|5),-(4<<8|6),-(4<<8|6), michael@0: 33, 36, 39, 44, michael@0: 1,-(1<<8|7),-(1<<8|8), michael@0: 1,-(1<<8|9),-(1<<8|10), michael@0: 2,-(2<<8|11),-(2<<8|12),-(2<<8|13),-(2<<8|14), michael@0: 4, michael@0: -(4<<8|15),-(4<<8|16),-(4<<8|17),-(4<<8|18), michael@0: -(4<<8|19),-(4<<8|20),-(4<<8|21),-(4<<8|22), michael@0: -(4<<8|23),-(4<<8|24),-(4<<8|25),-(4<<8|26), michael@0: -(4<<8|27),-(4<<8|28),-(4<<8|29),-(4<<8|30) michael@0: }; michael@0: return oc_huff_token_decode(_opb,OC_BLOCK_RUN_TREE); michael@0: } michael@0: michael@0: michael@0: michael@0: void oc_dec_accel_init_c(oc_dec_ctx *_dec){ michael@0: # if defined(OC_DEC_USE_VTABLE) michael@0: _dec->opt_vtable.dc_unpredict_mcu_plane= michael@0: oc_dec_dc_unpredict_mcu_plane_c; michael@0: # endif michael@0: } michael@0: michael@0: static int oc_dec_init(oc_dec_ctx *_dec,const th_info *_info, michael@0: const th_setup_info *_setup){ michael@0: int qti; michael@0: int pli; michael@0: int qi; michael@0: int ret; michael@0: ret=oc_state_init(&_dec->state,_info,3); michael@0: if(ret<0)return ret; michael@0: ret=oc_huff_trees_copy(_dec->huff_tables, michael@0: (const ogg_int16_t *const *)_setup->huff_tables); michael@0: if(ret<0){ michael@0: oc_state_clear(&_dec->state); michael@0: return ret; michael@0: } michael@0: /*For each fragment, allocate one byte for every DCT coefficient token, plus michael@0: one byte for extra-bits for each token, plus one more byte for the long michael@0: EOB run, just in case it's the very last token and has a run length of michael@0: one.*/ michael@0: _dec->dct_tokens=(unsigned char *)_ogg_malloc((64+64+1)* michael@0: _dec->state.nfrags*sizeof(_dec->dct_tokens[0])); michael@0: if(_dec->dct_tokens==NULL){ michael@0: oc_huff_trees_clear(_dec->huff_tables); michael@0: oc_state_clear(&_dec->state); michael@0: return TH_EFAULT; michael@0: } michael@0: for(qi=0;qi<64;qi++)for(pli=0;pli<3;pli++)for(qti=0;qti<2;qti++){ michael@0: _dec->state.dequant_tables[qi][pli][qti]= michael@0: _dec->state.dequant_table_data[qi][pli][qti]; michael@0: } michael@0: oc_dequant_tables_init(_dec->state.dequant_tables,_dec->pp_dc_scale, michael@0: &_setup->qinfo); michael@0: for(qi=0;qi<64;qi++){ michael@0: int qsum; michael@0: qsum=0; michael@0: for(qti=0;qti<2;qti++)for(pli=0;pli<3;pli++){ michael@0: qsum+=_dec->state.dequant_tables[qi][pli][qti][12]+ michael@0: _dec->state.dequant_tables[qi][pli][qti][17]+ michael@0: _dec->state.dequant_tables[qi][pli][qti][18]+ michael@0: _dec->state.dequant_tables[qi][pli][qti][24]<<(pli==0); michael@0: } michael@0: _dec->pp_sharp_mod[qi]=-(qsum>>11); michael@0: } michael@0: memcpy(_dec->state.loop_filter_limits,_setup->qinfo.loop_filter_limits, michael@0: sizeof(_dec->state.loop_filter_limits)); michael@0: oc_dec_accel_init(_dec); michael@0: _dec->pp_level=OC_PP_LEVEL_DISABLED; michael@0: _dec->dc_qis=NULL; michael@0: _dec->variances=NULL; michael@0: _dec->pp_frame_data=NULL; michael@0: _dec->stripe_cb.ctx=NULL; michael@0: _dec->stripe_cb.stripe_decoded=NULL; michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry=0; michael@0: _dec->telemetry_bits=0; michael@0: _dec->telemetry_qi=0; michael@0: _dec->telemetry_mbmode=0; michael@0: _dec->telemetry_mv=0; michael@0: _dec->telemetry_frame_data=NULL; michael@0: #endif michael@0: return 0; michael@0: } michael@0: michael@0: static void oc_dec_clear(oc_dec_ctx *_dec){ michael@0: #if defined(HAVE_CAIRO) michael@0: _ogg_free(_dec->telemetry_frame_data); michael@0: #endif michael@0: _ogg_free(_dec->pp_frame_data); michael@0: _ogg_free(_dec->variances); michael@0: _ogg_free(_dec->dc_qis); michael@0: _ogg_free(_dec->dct_tokens); michael@0: oc_huff_trees_clear(_dec->huff_tables); michael@0: oc_state_clear(&_dec->state); michael@0: } michael@0: michael@0: michael@0: static int oc_dec_frame_header_unpack(oc_dec_ctx *_dec){ michael@0: long val; michael@0: /*Check to make sure this is a data packet.*/ michael@0: val=oc_pack_read1(&_dec->opb); michael@0: if(val!=0)return TH_EBADPACKET; michael@0: /*Read in the frame type (I or P).*/ michael@0: val=oc_pack_read1(&_dec->opb); michael@0: _dec->state.frame_type=(int)val; michael@0: /*Read in the qi list.*/ michael@0: val=oc_pack_read(&_dec->opb,6); michael@0: _dec->state.qis[0]=(unsigned char)val; michael@0: val=oc_pack_read1(&_dec->opb); michael@0: if(!val)_dec->state.nqis=1; michael@0: else{ michael@0: val=oc_pack_read(&_dec->opb,6); michael@0: _dec->state.qis[1]=(unsigned char)val; michael@0: val=oc_pack_read1(&_dec->opb); michael@0: if(!val)_dec->state.nqis=2; michael@0: else{ michael@0: val=oc_pack_read(&_dec->opb,6); michael@0: _dec->state.qis[2]=(unsigned char)val; michael@0: _dec->state.nqis=3; michael@0: } michael@0: } michael@0: if(_dec->state.frame_type==OC_INTRA_FRAME){ michael@0: /*Keyframes have 3 unused configuration bits, holdovers from VP3 days. michael@0: Most of the other unused bits in the VP3 headers were eliminated. michael@0: I don't know why these remain.*/ michael@0: /*I wanted to eliminate wasted bits, but not all config wiggle room michael@0: --Monty.*/ michael@0: val=oc_pack_read(&_dec->opb,3); michael@0: if(val!=0)return TH_EIMPL; michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: /*Mark all fragments as coded and in OC_MODE_INTRA. michael@0: This also builds up the coded fragment list (in coded order), and clears the michael@0: uncoded fragment list. michael@0: It does not update the coded macro block list nor the super block flags, as michael@0: those are not used when decoding INTRA frames.*/ michael@0: static void oc_dec_mark_all_intra(oc_dec_ctx *_dec){ michael@0: const oc_sb_map *sb_maps; michael@0: const oc_sb_flags *sb_flags; michael@0: oc_fragment *frags; michael@0: ptrdiff_t *coded_fragis; michael@0: ptrdiff_t ncoded_fragis; michael@0: ptrdiff_t prev_ncoded_fragis; michael@0: unsigned nsbs; michael@0: unsigned sbi; michael@0: int pli; michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: prev_ncoded_fragis=ncoded_fragis=0; michael@0: sb_maps=(const oc_sb_map *)_dec->state.sb_maps; michael@0: sb_flags=_dec->state.sb_flags; michael@0: frags=_dec->state.frags; michael@0: sbi=nsbs=0; michael@0: for(pli=0;pli<3;pli++){ michael@0: nsbs+=_dec->state.fplanes[pli].nsbs; michael@0: for(;sbi=0){ michael@0: frags[fragi].coded=1; michael@0: frags[fragi].refi=OC_FRAME_SELF; michael@0: frags[fragi].mb_mode=OC_MODE_INTRA; michael@0: coded_fragis[ncoded_fragis++]=fragi; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: _dec->state.ncoded_fragis[pli]=ncoded_fragis-prev_ncoded_fragis; michael@0: prev_ncoded_fragis=ncoded_fragis; michael@0: } michael@0: _dec->state.ntotal_coded_fragis=ncoded_fragis; michael@0: } michael@0: michael@0: /*Decodes the bit flags indicating whether each super block is partially coded michael@0: or not. michael@0: Return: The number of partially coded super blocks.*/ michael@0: static unsigned oc_dec_partial_sb_flags_unpack(oc_dec_ctx *_dec){ michael@0: oc_sb_flags *sb_flags; michael@0: unsigned nsbs; michael@0: unsigned sbi; michael@0: unsigned npartial; michael@0: unsigned run_count; michael@0: long val; michael@0: int flag; michael@0: val=oc_pack_read1(&_dec->opb); michael@0: flag=(int)val; michael@0: sb_flags=_dec->state.sb_flags; michael@0: nsbs=_dec->state.nsbs; michael@0: sbi=npartial=0; michael@0: while(sbiopb); michael@0: full_run=run_count>=4129; michael@0: do{ michael@0: sb_flags[sbi].coded_partially=flag; michael@0: sb_flags[sbi].coded_fully=0; michael@0: npartial+=flag; michael@0: sbi++; michael@0: } michael@0: while(--run_count>0&&sbiopb); michael@0: flag=(int)val; michael@0: } michael@0: else flag=!flag; michael@0: } michael@0: /*TODO: run_count should be 0 here. michael@0: If it's not, we should issue a warning of some kind.*/ michael@0: return npartial; michael@0: } michael@0: michael@0: /*Decodes the bit flags for whether or not each non-partially-coded super michael@0: block is fully coded or not. michael@0: This function should only be called if there is at least one michael@0: non-partially-coded super block. michael@0: Return: The number of partially coded super blocks.*/ michael@0: static void oc_dec_coded_sb_flags_unpack(oc_dec_ctx *_dec){ michael@0: oc_sb_flags *sb_flags; michael@0: unsigned nsbs; michael@0: unsigned sbi; michael@0: unsigned run_count; michael@0: long val; michael@0: int flag; michael@0: sb_flags=_dec->state.sb_flags; michael@0: nsbs=_dec->state.nsbs; michael@0: /*Skip partially coded super blocks.*/ michael@0: for(sbi=0;sb_flags[sbi].coded_partially;sbi++); michael@0: val=oc_pack_read1(&_dec->opb); michael@0: flag=(int)val; michael@0: do{ michael@0: int full_run; michael@0: run_count=oc_sb_run_unpack(&_dec->opb); michael@0: full_run=run_count>=4129; michael@0: for(;sbiopb); michael@0: flag=(int)val; michael@0: } michael@0: else flag=!flag; michael@0: } michael@0: while(sbistate.nsbs)oc_dec_coded_sb_flags_unpack(_dec); michael@0: if(npartial>0){ michael@0: val=oc_pack_read1(&_dec->opb); michael@0: flag=!(int)val; michael@0: } michael@0: else flag=0; michael@0: sb_maps=(const oc_sb_map *)_dec->state.sb_maps; michael@0: sb_flags=_dec->state.sb_flags; michael@0: mb_modes=_dec->state.mb_modes; michael@0: frags=_dec->state.frags; michael@0: sbi=nsbs=run_count=0; michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: uncoded_fragis=coded_fragis+_dec->state.nfrags; michael@0: prev_ncoded_fragis=ncoded_fragis=nuncoded_fragis=0; michael@0: for(pli=0;pli<3;pli++){ michael@0: nsbs+=_dec->state.fplanes[pli].nsbs; michael@0: for(;sbi=0){ michael@0: int coded; michael@0: if(sb_flags[sbi].coded_fully)coded=1; michael@0: else if(!sb_flags[sbi].coded_partially)coded=0; michael@0: else{ michael@0: if(run_count<=0){ michael@0: run_count=oc_block_run_unpack(&_dec->opb); michael@0: flag=!flag; michael@0: } michael@0: run_count--; michael@0: coded=flag; michael@0: } michael@0: if(coded)coded_fragis[ncoded_fragis++]=fragi; michael@0: else *(uncoded_fragis-++nuncoded_fragis)=fragi; michael@0: quad_coded|=coded; michael@0: frags[fragi].coded=coded; michael@0: frags[fragi].refi=OC_FRAME_NONE; michael@0: } michael@0: } michael@0: /*Remember if there's a coded luma block in this macro block.*/ michael@0: if(!pli)mb_modes[sbi<<2|quadi]=quad_coded; michael@0: } michael@0: } michael@0: _dec->state.ncoded_fragis[pli]=ncoded_fragis-prev_ncoded_fragis; michael@0: prev_ncoded_fragis=ncoded_fragis; michael@0: } michael@0: _dec->state.ntotal_coded_fragis=ncoded_fragis; michael@0: /*TODO: run_count should be 0 here. michael@0: If it's not, we should issue a warning of some kind.*/ michael@0: } michael@0: michael@0: michael@0: /*Coding scheme: michael@0: Codeword Mode Index michael@0: 0 0 michael@0: 10 1 michael@0: 110 2 michael@0: 1110 3 michael@0: 11110 4 michael@0: 111110 5 michael@0: 1111110 6 michael@0: 1111111 7*/ michael@0: static const ogg_int16_t OC_VLC_MODE_TREE[26]={ michael@0: 4, michael@0: -(1<<8|0),-(1<<8|0),-(1<<8|0),-(1<<8|0), michael@0: -(1<<8|0),-(1<<8|0),-(1<<8|0),-(1<<8|0), michael@0: -(2<<8|1),-(2<<8|1),-(2<<8|1),-(2<<8|1), michael@0: -(3<<8|2),-(3<<8|2),-(4<<8|3),17, michael@0: 3, michael@0: -(1<<8|4),-(1<<8|4),-(1<<8|4),-(1<<8|4), michael@0: -(2<<8|5),-(2<<8|5),-(3<<8|6),-(3<<8|7) michael@0: }; michael@0: michael@0: static const ogg_int16_t OC_CLC_MODE_TREE[9]={ michael@0: 3, michael@0: -(3<<8|0),-(3<<8|1),-(3<<8|2),-(3<<8|3), michael@0: -(3<<8|4),-(3<<8|5),-(3<<8|6),-(3<<8|7) michael@0: }; michael@0: michael@0: /*Unpacks the list of macro block modes for INTER frames.*/ michael@0: static void oc_dec_mb_modes_unpack(oc_dec_ctx *_dec){ michael@0: signed char *mb_modes; michael@0: const unsigned char *alphabet; michael@0: unsigned char scheme0_alphabet[8]; michael@0: const ogg_int16_t *mode_tree; michael@0: size_t nmbs; michael@0: size_t mbi; michael@0: long val; michael@0: int mode_scheme; michael@0: val=oc_pack_read(&_dec->opb,3); michael@0: mode_scheme=(int)val; michael@0: if(mode_scheme==0){ michael@0: int mi; michael@0: /*Just in case, initialize the modes to something. michael@0: If the bitstream doesn't contain each index exactly once, it's likely michael@0: corrupt and the rest of the packet is garbage anyway, but this way we michael@0: won't crash, and we'll decode SOMETHING.*/ michael@0: /*LOOP VECTORIZES*/ michael@0: for(mi=0;miopb,3); michael@0: scheme0_alphabet[val]=OC_MODE_ALPHABETS[6][mi]; michael@0: } michael@0: alphabet=scheme0_alphabet; michael@0: } michael@0: else alphabet=OC_MODE_ALPHABETS[mode_scheme-1]; michael@0: mode_tree=mode_scheme==7?OC_CLC_MODE_TREE:OC_VLC_MODE_TREE; michael@0: mb_modes=_dec->state.mb_modes; michael@0: nmbs=_dec->state.nmbs; michael@0: for(mbi=0;mbi0){ michael@0: /*We have a coded luma block; decode a mode.*/ michael@0: mb_modes[mbi]=alphabet[oc_huff_token_decode(&_dec->opb,mode_tree)]; michael@0: } michael@0: /*For other valid macro blocks, INTER_NOMV is forced, but we rely on the michael@0: fact that OC_MODE_INTER_NOMV is already 0.*/ michael@0: } michael@0: } michael@0: michael@0: michael@0: michael@0: static const ogg_int16_t OC_VLC_MV_COMP_TREE[101]={ michael@0: 5, michael@0: -(3<<8|32+0),-(3<<8|32+0),-(3<<8|32+0),-(3<<8|32+0), michael@0: -(3<<8|32+1),-(3<<8|32+1),-(3<<8|32+1),-(3<<8|32+1), michael@0: -(3<<8|32-1),-(3<<8|32-1),-(3<<8|32-1),-(3<<8|32-1), michael@0: -(4<<8|32+2),-(4<<8|32+2),-(4<<8|32-2),-(4<<8|32-2), michael@0: -(4<<8|32+3),-(4<<8|32+3),-(4<<8|32-3),-(4<<8|32-3), michael@0: 33, 36, 39, 42, michael@0: 45, 50, 55, 60, michael@0: 65, 74, 83, 92, michael@0: 1,-(1<<8|32+4),-(1<<8|32-4), michael@0: 1,-(1<<8|32+5),-(1<<8|32-5), michael@0: 1,-(1<<8|32+6),-(1<<8|32-6), michael@0: 1,-(1<<8|32+7),-(1<<8|32-7), michael@0: 2,-(2<<8|32+8),-(2<<8|32-8),-(2<<8|32+9),-(2<<8|32-9), michael@0: 2,-(2<<8|32+10),-(2<<8|32-10),-(2<<8|32+11),-(2<<8|32-11), michael@0: 2,-(2<<8|32+12),-(2<<8|32-12),-(2<<8|32+13),-(2<<8|32-13), michael@0: 2,-(2<<8|32+14),-(2<<8|32-14),-(2<<8|32+15),-(2<<8|32-15), michael@0: 3, michael@0: -(3<<8|32+16),-(3<<8|32-16),-(3<<8|32+17),-(3<<8|32-17), michael@0: -(3<<8|32+18),-(3<<8|32-18),-(3<<8|32+19),-(3<<8|32-19), michael@0: 3, michael@0: -(3<<8|32+20),-(3<<8|32-20),-(3<<8|32+21),-(3<<8|32-21), michael@0: -(3<<8|32+22),-(3<<8|32-22),-(3<<8|32+23),-(3<<8|32-23), michael@0: 3, michael@0: -(3<<8|32+24),-(3<<8|32-24),-(3<<8|32+25),-(3<<8|32-25), michael@0: -(3<<8|32+26),-(3<<8|32-26),-(3<<8|32+27),-(3<<8|32-27), michael@0: 3, michael@0: -(3<<8|32+28),-(3<<8|32-28),-(3<<8|32+29),-(3<<8|32-29), michael@0: -(3<<8|32+30),-(3<<8|32-30),-(3<<8|32+31),-(3<<8|32-31) michael@0: }; michael@0: michael@0: static const ogg_int16_t OC_CLC_MV_COMP_TREE[65]={ michael@0: 6, michael@0: -(6<<8|32 +0),-(6<<8|32 -0),-(6<<8|32 +1),-(6<<8|32 -1), michael@0: -(6<<8|32 +2),-(6<<8|32 -2),-(6<<8|32 +3),-(6<<8|32 -3), michael@0: -(6<<8|32 +4),-(6<<8|32 -4),-(6<<8|32 +5),-(6<<8|32 -5), michael@0: -(6<<8|32 +6),-(6<<8|32 -6),-(6<<8|32 +7),-(6<<8|32 -7), michael@0: -(6<<8|32 +8),-(6<<8|32 -8),-(6<<8|32 +9),-(6<<8|32 -9), michael@0: -(6<<8|32+10),-(6<<8|32-10),-(6<<8|32+11),-(6<<8|32-11), michael@0: -(6<<8|32+12),-(6<<8|32-12),-(6<<8|32+13),-(6<<8|32-13), michael@0: -(6<<8|32+14),-(6<<8|32-14),-(6<<8|32+15),-(6<<8|32-15), michael@0: -(6<<8|32+16),-(6<<8|32-16),-(6<<8|32+17),-(6<<8|32-17), michael@0: -(6<<8|32+18),-(6<<8|32-18),-(6<<8|32+19),-(6<<8|32-19), michael@0: -(6<<8|32+20),-(6<<8|32-20),-(6<<8|32+21),-(6<<8|32-21), michael@0: -(6<<8|32+22),-(6<<8|32-22),-(6<<8|32+23),-(6<<8|32-23), michael@0: -(6<<8|32+24),-(6<<8|32-24),-(6<<8|32+25),-(6<<8|32-25), michael@0: -(6<<8|32+26),-(6<<8|32-26),-(6<<8|32+27),-(6<<8|32-27), michael@0: -(6<<8|32+28),-(6<<8|32-28),-(6<<8|32+29),-(6<<8|32-29), michael@0: -(6<<8|32+30),-(6<<8|32-30),-(6<<8|32+31),-(6<<8|32-31) michael@0: }; michael@0: michael@0: michael@0: static oc_mv oc_mv_unpack(oc_pack_buf *_opb,const ogg_int16_t *_tree){ michael@0: int dx; michael@0: int dy; michael@0: dx=oc_huff_token_decode(_opb,_tree)-32; michael@0: dy=oc_huff_token_decode(_opb,_tree)-32; michael@0: return OC_MV(dx,dy); michael@0: } michael@0: michael@0: /*Unpacks the list of motion vectors for INTER frames, and propagtes the macro michael@0: block modes and motion vectors to the individual fragments.*/ michael@0: static void oc_dec_mv_unpack_and_frag_modes_fill(oc_dec_ctx *_dec){ michael@0: const oc_mb_map *mb_maps; michael@0: const signed char *mb_modes; michael@0: oc_set_chroma_mvs_func set_chroma_mvs; michael@0: const ogg_int16_t *mv_comp_tree; michael@0: oc_fragment *frags; michael@0: oc_mv *frag_mvs; michael@0: const unsigned char *map_idxs; michael@0: int map_nidxs; michael@0: oc_mv last_mv; michael@0: oc_mv prior_mv; michael@0: oc_mv cbmvs[4]; michael@0: size_t nmbs; michael@0: size_t mbi; michael@0: long val; michael@0: set_chroma_mvs=OC_SET_CHROMA_MVS_TABLE[_dec->state.info.pixel_fmt]; michael@0: val=oc_pack_read1(&_dec->opb); michael@0: mv_comp_tree=val?OC_CLC_MV_COMP_TREE:OC_VLC_MV_COMP_TREE; michael@0: map_idxs=OC_MB_MAP_IDXS[_dec->state.info.pixel_fmt]; michael@0: map_nidxs=OC_MB_MAP_NIDXS[_dec->state.info.pixel_fmt]; michael@0: prior_mv=last_mv=0; michael@0: frags=_dec->state.frags; michael@0: frag_mvs=_dec->state.frag_mvs; michael@0: mb_maps=(const oc_mb_map *)_dec->state.mb_maps; michael@0: mb_modes=_dec->state.mb_modes; michael@0: nmbs=_dec->state.nmbs; michael@0: for(mbi=0;mbiopb,mv_comp_tree); michael@0: frag_mvs[fragi]=lbmvs[bi]; michael@0: } michael@0: else lbmvs[bi]=0; michael@0: } michael@0: (*set_chroma_mvs)(cbmvs,lbmvs); michael@0: for(mapii=4;mapii>2][bi]; michael@0: if(frags[fragi].coded){ michael@0: frags[fragi].refi=OC_FRAME_PREV; michael@0: frags[fragi].mb_mode=OC_MODE_INTER_MV_FOUR; michael@0: frag_mvs[fragi]=cbmvs[bi]; michael@0: } michael@0: } michael@0: } michael@0: else{ michael@0: switch(mb_mode){ michael@0: case OC_MODE_INTER_MV:{ michael@0: prior_mv=last_mv; michael@0: last_mv=mbmv=oc_mv_unpack(&_dec->opb,mv_comp_tree); michael@0: }break; michael@0: case OC_MODE_INTER_MV_LAST:mbmv=last_mv;break; michael@0: case OC_MODE_INTER_MV_LAST2:{ michael@0: mbmv=prior_mv; michael@0: prior_mv=last_mv; michael@0: last_mv=mbmv; michael@0: }break; michael@0: case OC_MODE_GOLDEN_MV:{ michael@0: mbmv=oc_mv_unpack(&_dec->opb,mv_comp_tree); michael@0: }break; michael@0: default:mbmv=0;break; michael@0: } michael@0: /*Fill in the MVs for the fragments.*/ michael@0: refi=OC_FRAME_FOR_MODE(mb_mode); michael@0: mapii=0; michael@0: do{ michael@0: mapi=map_idxs[mapii]; michael@0: fragi=mb_maps[mbi][mapi>>2][mapi&3]; michael@0: if(frags[fragi].coded){ michael@0: frags[fragi].refi=refi; michael@0: frags[fragi].mb_mode=mb_mode; michael@0: frag_mvs[fragi]=mbmv; michael@0: } michael@0: } michael@0: while(++mapiistate.ntotal_coded_fragis; michael@0: if(ncoded_fragis<=0)return; michael@0: frags=_dec->state.frags; michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: if(_dec->state.nqis==1){ michael@0: /*If this frame has only a single qi value, then just use it for all coded michael@0: fragments.*/ michael@0: for(fragii=0;fragiiopb); michael@0: flag=(int)val; michael@0: nqi1=0; michael@0: fragii=0; michael@0: while(fragiiopb); michael@0: full_run=run_count>=4129; michael@0: do{ michael@0: frags[coded_fragis[fragii++]].qii=flag; michael@0: nqi1+=flag; michael@0: } michael@0: while(--run_count>0&&fragiiopb); michael@0: flag=(int)val; michael@0: } michael@0: else flag=!flag; michael@0: } michael@0: /*TODO: run_count should be 0 here. michael@0: If it's not, we should issue a warning of some kind.*/ michael@0: /*If we have 3 different qi's for this frame, and there was at least one michael@0: fragment with a non-zero qi, make the second pass.*/ michael@0: if(_dec->state.nqis==3&&nqi1>0){ michael@0: /*Skip qii==0 fragments.*/ michael@0: for(fragii=0;frags[coded_fragis[fragii]].qii==0;fragii++); michael@0: val=oc_pack_read1(&_dec->opb); michael@0: flag=(int)val; michael@0: do{ michael@0: int full_run; michael@0: run_count=oc_sb_run_unpack(&_dec->opb); michael@0: full_run=run_count>=4129; michael@0: for(;fragiiopb); michael@0: flag=(int)val; michael@0: } michael@0: else flag=!flag; michael@0: } michael@0: while(fragiidct_tokens; michael@0: frags=_dec->state.frags; michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: ncoded_fragis=fragii=eobs=ti=0; michael@0: for(pli=0;pli<3;pli++){ michael@0: ptrdiff_t run_counts[64]; michael@0: ptrdiff_t eob_count; michael@0: ptrdiff_t eobi; michael@0: int rli; michael@0: ncoded_fragis+=_dec->state.ncoded_fragis[pli]; michael@0: memset(run_counts,0,sizeof(run_counts)); michael@0: _dec->eob_runs[pli][0]=eobs; michael@0: _dec->ti0[pli][0]=ti; michael@0: /*Continue any previous EOB run, if there was one.*/ michael@0: eobi=eobs; michael@0: if(ncoded_fragis-fragii0)frags[coded_fragis[fragii++]].dc=0; michael@0: while(fragiiopb, michael@0: _dec->huff_tables[_huff_idxs[pli+1>>1]]); michael@0: dct_tokens[ti++]=(unsigned char)token; michael@0: if(OC_DCT_TOKEN_NEEDS_MORE(token)){ michael@0: eb=(int)oc_pack_read(&_dec->opb, michael@0: OC_INTERNAL_DCT_TOKEN_EXTRA_BITS[token]); michael@0: dct_tokens[ti++]=(unsigned char)eb; michael@0: if(token==OC_DCT_TOKEN_FAT_EOB)dct_tokens[ti++]=(unsigned char)(eb>>8); michael@0: eb<<=OC_DCT_TOKEN_EB_POS(token); michael@0: } michael@0: else eb=0; michael@0: cw=OC_DCT_CODE_WORD[token]+eb; michael@0: eobs=cw>>OC_DCT_CW_EOB_SHIFT&0xFFF; michael@0: if(cw==OC_DCT_CW_FINISH)eobs=OC_DCT_EOB_FINISH; michael@0: if(eobs){ michael@0: eobi=OC_MINI(eobs,ncoded_fragis-fragii); michael@0: eob_count+=eobi; michael@0: eobs-=eobi; michael@0: while(eobi-->0)frags[coded_fragis[fragii++]].dc=0; michael@0: } michael@0: else{ michael@0: int coeff; michael@0: skip=(unsigned char)(cw>>OC_DCT_CW_RLEN_SHIFT); michael@0: cw^=-(cw&1<>OC_DCT_CW_MAG_SHIFT; michael@0: if(skip)coeff=0; michael@0: run_counts[skip]++; michael@0: frags[coded_fragis[fragii++]].dc=coeff; michael@0: } michael@0: } michael@0: /*Add the total EOB count to the longest run length.*/ michael@0: run_counts[63]+=eob_count; michael@0: /*And convert the run_counts array to a moment table.*/ michael@0: for(rli=63;rli-->0;)run_counts[rli]+=run_counts[rli+1]; michael@0: /*Finally, subtract off the number of coefficients that have been michael@0: accounted for by runs started in this coefficient.*/ michael@0: for(rli=64;rli-->0;)_ntoks_left[pli][rli]-=run_counts[rli]; michael@0: } michael@0: _dec->dct_tokens_count=ti; michael@0: return eobs; michael@0: } michael@0: michael@0: /*Unpacks the AC coefficient tokens. michael@0: This can completely discard coefficient values while unpacking, and so is michael@0: somewhat simpler than unpacking the DC coefficient tokens. michael@0: _huff_idx: The index of the Huffman table to use for each color plane. michael@0: _ntoks_left: The number of tokens left to be decoded in each color plane for michael@0: each coefficient. michael@0: This is updated as EOB tokens and zero run tokens are decoded. michael@0: _eobs: The length of any outstanding EOB run from previous michael@0: coefficients. michael@0: Return: The length of any outstanding EOB run.*/ michael@0: static int oc_dec_ac_coeff_unpack(oc_dec_ctx *_dec,int _zzi,int _huff_idxs[2], michael@0: ptrdiff_t _ntoks_left[3][64],ptrdiff_t _eobs){ michael@0: unsigned char *dct_tokens; michael@0: ptrdiff_t ti; michael@0: int pli; michael@0: dct_tokens=_dec->dct_tokens; michael@0: ti=_dec->dct_tokens_count; michael@0: for(pli=0;pli<3;pli++){ michael@0: ptrdiff_t run_counts[64]; michael@0: ptrdiff_t eob_count; michael@0: size_t ntoks_left; michael@0: size_t ntoks; michael@0: int rli; michael@0: _dec->eob_runs[pli][_zzi]=_eobs; michael@0: _dec->ti0[pli][_zzi]=ti; michael@0: ntoks_left=_ntoks_left[pli][_zzi]; michael@0: memset(run_counts,0,sizeof(run_counts)); michael@0: eob_count=0; michael@0: ntoks=0; michael@0: while(ntoks+_eobsopb, michael@0: _dec->huff_tables[_huff_idxs[pli+1>>1]]); michael@0: dct_tokens[ti++]=(unsigned char)token; michael@0: if(OC_DCT_TOKEN_NEEDS_MORE(token)){ michael@0: eb=(int)oc_pack_read(&_dec->opb, michael@0: OC_INTERNAL_DCT_TOKEN_EXTRA_BITS[token]); michael@0: dct_tokens[ti++]=(unsigned char)eb; michael@0: if(token==OC_DCT_TOKEN_FAT_EOB)dct_tokens[ti++]=(unsigned char)(eb>>8); michael@0: eb<<=OC_DCT_TOKEN_EB_POS(token); michael@0: } michael@0: else eb=0; michael@0: cw=OC_DCT_CODE_WORD[token]+eb; michael@0: skip=(unsigned char)(cw>>OC_DCT_CW_RLEN_SHIFT); michael@0: _eobs=cw>>OC_DCT_CW_EOB_SHIFT&0xFFF; michael@0: if(cw==OC_DCT_CW_FINISH)_eobs=OC_DCT_EOB_FINISH; michael@0: if(_eobs==0){ michael@0: run_counts[skip]++; michael@0: ntoks++; michael@0: } michael@0: } michael@0: /*Add the portion of the last EOB run actually used by this coefficient.*/ michael@0: eob_count+=ntoks_left-ntoks; michael@0: /*And remove it from the remaining EOB count.*/ michael@0: _eobs-=ntoks_left-ntoks; michael@0: /*Add the total EOB count to the longest run length.*/ michael@0: run_counts[63]+=eob_count; michael@0: /*And convert the run_counts array to a moment table.*/ michael@0: for(rli=63;rli-->0;)run_counts[rli]+=run_counts[rli+1]; michael@0: /*Finally, subtract off the number of coefficients that have been michael@0: accounted for by runs started in this coefficient.*/ michael@0: for(rli=64-_zzi;rli-->0;)_ntoks_left[pli][_zzi+rli]-=run_counts[rli]; michael@0: } michael@0: _dec->dct_tokens_count=ti; michael@0: return _eobs; michael@0: } michael@0: michael@0: /*Tokens describing the DCT coefficients that belong to each fragment are michael@0: stored in the bitstream grouped by coefficient, not by fragment. michael@0: michael@0: This means that we either decode all the tokens in order, building up a michael@0: separate coefficient list for each fragment as we go, and then go back and michael@0: do the iDCT on each fragment, or we have to create separate lists of tokens michael@0: for each coefficient, so that we can pull the next token required off the michael@0: head of the appropriate list when decoding a specific fragment. michael@0: michael@0: The former was VP3's choice, and it meant 2*w*h extra storage for all the michael@0: decoded coefficient values. michael@0: michael@0: We take the second option, which lets us store just one to three bytes per michael@0: token (generally far fewer than the number of coefficients, due to EOB michael@0: tokens and zero runs), and which requires us to only maintain a counter for michael@0: each of the 64 coefficients, instead of a counter for every fragment to michael@0: determine where the next token goes. michael@0: michael@0: We actually use 3 counters per coefficient, one for each color plane, so we michael@0: can decode all color planes simultaneously. michael@0: This lets color conversion, etc., be done as soon as a full MCU (one or michael@0: two super block rows) is decoded, while the image data is still in cache.*/ michael@0: michael@0: static void oc_dec_residual_tokens_unpack(oc_dec_ctx *_dec){ michael@0: static const unsigned char OC_HUFF_LIST_MAX[5]={1,6,15,28,64}; michael@0: ptrdiff_t ntoks_left[3][64]; michael@0: int huff_idxs[2]; michael@0: ptrdiff_t eobs; michael@0: long val; michael@0: int pli; michael@0: int zzi; michael@0: int hgi; michael@0: for(pli=0;pli<3;pli++)for(zzi=0;zzi<64;zzi++){ michael@0: ntoks_left[pli][zzi]=_dec->state.ncoded_fragis[pli]; michael@0: } michael@0: val=oc_pack_read(&_dec->opb,4); michael@0: huff_idxs[0]=(int)val; michael@0: val=oc_pack_read(&_dec->opb,4); michael@0: huff_idxs[1]=(int)val; michael@0: _dec->eob_runs[0][0]=0; michael@0: eobs=oc_dec_dc_coeff_unpack(_dec,huff_idxs,ntoks_left); michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_dc_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: val=oc_pack_read(&_dec->opb,4); michael@0: huff_idxs[0]=(int)val; michael@0: val=oc_pack_read(&_dec->opb,4); michael@0: huff_idxs[1]=(int)val; michael@0: zzi=1; michael@0: for(hgi=1;hgi<5;hgi++){ michael@0: huff_idxs[0]+=16; michael@0: huff_idxs[1]+=16; michael@0: for(;zzipp_level<=OC_PP_LEVEL_DISABLED){ michael@0: if(_dec->dc_qis!=NULL){ michael@0: _ogg_free(_dec->dc_qis); michael@0: _dec->dc_qis=NULL; michael@0: _ogg_free(_dec->variances); michael@0: _dec->variances=NULL; michael@0: _ogg_free(_dec->pp_frame_data); michael@0: _dec->pp_frame_data=NULL; michael@0: } michael@0: return 1; michael@0: } michael@0: if(_dec->dc_qis==NULL){ michael@0: /*If we haven't been tracking DC quantization indices, there's no point in michael@0: starting now.*/ michael@0: if(_dec->state.frame_type!=OC_INTRA_FRAME)return 1; michael@0: _dec->dc_qis=(unsigned char *)_ogg_malloc( michael@0: _dec->state.nfrags*sizeof(_dec->dc_qis[0])); michael@0: if(_dec->dc_qis==NULL)return 1; michael@0: memset(_dec->dc_qis,_dec->state.qis[0],_dec->state.nfrags); michael@0: } michael@0: else{ michael@0: unsigned char *dc_qis; michael@0: const ptrdiff_t *coded_fragis; michael@0: ptrdiff_t ncoded_fragis; michael@0: ptrdiff_t fragii; michael@0: unsigned char qi0; michael@0: /*Update the DC quantization index of each coded block.*/ michael@0: dc_qis=_dec->dc_qis; michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: ncoded_fragis=_dec->state.ncoded_fragis[0]+ michael@0: _dec->state.ncoded_fragis[1]+_dec->state.ncoded_fragis[2]; michael@0: qi0=(unsigned char)_dec->state.qis[0]; michael@0: for(fragii=0;fragiipp_level<=OC_PP_LEVEL_TRACKDCQI){ michael@0: if(_dec->variances!=NULL){ michael@0: _ogg_free(_dec->variances); michael@0: _dec->variances=NULL; michael@0: _ogg_free(_dec->pp_frame_data); michael@0: _dec->pp_frame_data=NULL; michael@0: } michael@0: return 1; michael@0: } michael@0: if(_dec->variances==NULL){ michael@0: size_t frame_sz; michael@0: size_t c_sz; michael@0: int c_w; michael@0: int c_h; michael@0: frame_sz=_dec->state.info.frame_width*(size_t)_dec->state.info.frame_height; michael@0: c_w=_dec->state.info.frame_width>>!(_dec->state.info.pixel_fmt&1); michael@0: c_h=_dec->state.info.frame_height>>!(_dec->state.info.pixel_fmt&2); michael@0: c_sz=c_w*(size_t)c_h; michael@0: /*Allocate space for the chroma planes, even if we're not going to use michael@0: them; this simplifies allocation state management, though it may waste michael@0: memory on the few systems that don't overcommit pages.*/ michael@0: frame_sz+=c_sz<<1; michael@0: _dec->pp_frame_data=(unsigned char *)_ogg_malloc( michael@0: frame_sz*sizeof(_dec->pp_frame_data[0])); michael@0: _dec->variances=(int *)_ogg_malloc( michael@0: _dec->state.nfrags*sizeof(_dec->variances[0])); michael@0: if(_dec->variances==NULL||_dec->pp_frame_data==NULL){ michael@0: _ogg_free(_dec->pp_frame_data); michael@0: _dec->pp_frame_data=NULL; michael@0: _ogg_free(_dec->variances); michael@0: _dec->variances=NULL; michael@0: return 1; michael@0: } michael@0: /*Force an update of the PP buffer pointers.*/ michael@0: _dec->pp_frame_state=0; michael@0: } michael@0: /*Update the PP buffer pointers if necessary.*/ michael@0: if(_dec->pp_frame_state!=1+(_dec->pp_level>=OC_PP_LEVEL_DEBLOCKC)){ michael@0: if(_dec->pp_levelpp_frame_buf[0].width=_dec->state.info.frame_width; michael@0: _dec->pp_frame_buf[0].height=_dec->state.info.frame_height; michael@0: _dec->pp_frame_buf[0].stride=-_dec->pp_frame_buf[0].width; michael@0: _dec->pp_frame_buf[0].data=_dec->pp_frame_data+ michael@0: (1-_dec->pp_frame_buf[0].height)*(ptrdiff_t)_dec->pp_frame_buf[0].stride; michael@0: } michael@0: else{ michael@0: size_t y_sz; michael@0: size_t c_sz; michael@0: int c_w; michael@0: int c_h; michael@0: /*Otherwise, set up pointers to all three PP planes.*/ michael@0: y_sz=_dec->state.info.frame_width*(size_t)_dec->state.info.frame_height; michael@0: c_w=_dec->state.info.frame_width>>!(_dec->state.info.pixel_fmt&1); michael@0: c_h=_dec->state.info.frame_height>>!(_dec->state.info.pixel_fmt&2); michael@0: c_sz=c_w*(size_t)c_h; michael@0: _dec->pp_frame_buf[0].width=_dec->state.info.frame_width; michael@0: _dec->pp_frame_buf[0].height=_dec->state.info.frame_height; michael@0: _dec->pp_frame_buf[0].stride=_dec->pp_frame_buf[0].width; michael@0: _dec->pp_frame_buf[0].data=_dec->pp_frame_data; michael@0: _dec->pp_frame_buf[1].width=c_w; michael@0: _dec->pp_frame_buf[1].height=c_h; michael@0: _dec->pp_frame_buf[1].stride=_dec->pp_frame_buf[1].width; michael@0: _dec->pp_frame_buf[1].data=_dec->pp_frame_buf[0].data+y_sz; michael@0: _dec->pp_frame_buf[2].width=c_w; michael@0: _dec->pp_frame_buf[2].height=c_h; michael@0: _dec->pp_frame_buf[2].stride=_dec->pp_frame_buf[2].width; michael@0: _dec->pp_frame_buf[2].data=_dec->pp_frame_buf[1].data+c_sz; michael@0: oc_ycbcr_buffer_flip(_dec->pp_frame_buf,_dec->pp_frame_buf); michael@0: } michael@0: _dec->pp_frame_state=1+(_dec->pp_level>=OC_PP_LEVEL_DEBLOCKC); michael@0: } michael@0: /*If we're not processing chroma, copy the reference frame's chroma planes.*/ michael@0: if(_dec->pp_levelpp_frame_buf+1, michael@0: _dec->state.ref_frame_bufs[_dec->state.ref_frame_idx[OC_FRAME_SELF]]+1, michael@0: sizeof(_dec->pp_frame_buf[1])*2); michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: michael@0: /*Initialize the main decoding pipeline.*/ michael@0: static void oc_dec_pipeline_init(oc_dec_ctx *_dec, michael@0: oc_dec_pipeline_state *_pipe){ michael@0: const ptrdiff_t *coded_fragis; michael@0: const ptrdiff_t *uncoded_fragis; michael@0: int flimit; michael@0: int pli; michael@0: int qii; michael@0: int qti; michael@0: int zzi; michael@0: /*If chroma is sub-sampled in the vertical direction, we have to decode two michael@0: super block rows of Y' for each super block row of Cb and Cr.*/ michael@0: _pipe->mcu_nvfrags=4<state.info.pixel_fmt&2); michael@0: /*Initialize the token and extra bits indices for each plane and michael@0: coefficient.*/ michael@0: memcpy(_pipe->ti,_dec->ti0,sizeof(_pipe->ti)); michael@0: /*Also copy over the initial the EOB run counts.*/ michael@0: memcpy(_pipe->eob_runs,_dec->eob_runs,sizeof(_pipe->eob_runs)); michael@0: /*Set up per-plane pointers to the coded and uncoded fragments lists.*/ michael@0: coded_fragis=_dec->state.coded_fragis; michael@0: uncoded_fragis=coded_fragis+_dec->state.nfrags; michael@0: for(pli=0;pli<3;pli++){ michael@0: ptrdiff_t ncoded_fragis; michael@0: _pipe->coded_fragis[pli]=coded_fragis; michael@0: _pipe->uncoded_fragis[pli]=uncoded_fragis; michael@0: ncoded_fragis=_dec->state.ncoded_fragis[pli]; michael@0: coded_fragis+=ncoded_fragis; michael@0: uncoded_fragis+=ncoded_fragis-_dec->state.fplanes[pli].nfrags; michael@0: } michael@0: /*Set up condensed quantizer tables.*/ michael@0: for(pli=0;pli<3;pli++){ michael@0: for(qii=0;qii<_dec->state.nqis;qii++){ michael@0: for(qti=0;qti<2;qti++){ michael@0: _pipe->dequant[pli][qii][qti]= michael@0: _dec->state.dequant_tables[_dec->state.qis[qii]][pli][qti]; michael@0: } michael@0: } michael@0: } michael@0: /*Set the previous DC predictor to 0 for all color planes and frame types.*/ michael@0: memset(_pipe->pred_last,0,sizeof(_pipe->pred_last)); michael@0: /*Initialize the bounding value array for the loop filter.*/ michael@0: flimit=_dec->state.loop_filter_limits[_dec->state.qis[0]]; michael@0: _pipe->loop_filter=flimit!=0; michael@0: if(flimit!=0)oc_loop_filter_init(&_dec->state,_pipe->bounding_values,flimit); michael@0: /*Initialize any buffers needed for post-processing. michael@0: We also save the current post-processing level, to guard against the user michael@0: changing it from a callback.*/ michael@0: if(!oc_dec_postprocess_init(_dec))_pipe->pp_level=_dec->pp_level; michael@0: /*If we don't have enough information to post-process, disable it, regardless michael@0: of the user-requested level.*/ michael@0: else{ michael@0: _pipe->pp_level=OC_PP_LEVEL_DISABLED; michael@0: memcpy(_dec->pp_frame_buf, michael@0: _dec->state.ref_frame_bufs[_dec->state.ref_frame_idx[OC_FRAME_SELF]], michael@0: sizeof(_dec->pp_frame_buf[0])*3); michael@0: } michael@0: /*Clear down the DCT coefficient buffer for the first block.*/ michael@0: for(zzi=0;zzi<64;zzi++)_pipe->dct_coeffs[zzi]=0; michael@0: } michael@0: michael@0: /*Undo the DC prediction in a single plane of an MCU (one or two super block michael@0: rows). michael@0: As a side effect, the number of coded and uncoded fragments in this plane of michael@0: the MCU is also computed.*/ michael@0: void oc_dec_dc_unpredict_mcu_plane_c(oc_dec_ctx *_dec, michael@0: oc_dec_pipeline_state *_pipe,int _pli){ michael@0: const oc_fragment_plane *fplane; michael@0: oc_fragment *frags; michael@0: int *pred_last; michael@0: ptrdiff_t ncoded_fragis; michael@0: ptrdiff_t fragi; michael@0: int fragx; michael@0: int fragy; michael@0: int fragy0; michael@0: int fragy_end; michael@0: int nhfrags; michael@0: /*Compute the first and last fragment row of the current MCU for this michael@0: plane.*/ michael@0: fplane=_dec->state.fplanes+_pli; michael@0: fragy0=_pipe->fragy0[_pli]; michael@0: fragy_end=_pipe->fragy_end[_pli]; michael@0: nhfrags=fplane->nhfrags; michael@0: pred_last=_pipe->pred_last[_pli]; michael@0: frags=_dec->state.frags; michael@0: ncoded_fragis=0; michael@0: fragi=fplane->froffset+fragy0*(ptrdiff_t)nhfrags; michael@0: for(fragy=fragy0;fragy=nhfrags)ur_ref=-1; michael@0: else ur_ref=u_frags[fragi+1].refi; michael@0: if(frags[fragi].coded){ michael@0: int pred; michael@0: int refi; michael@0: refi=frags[fragi].refi; michael@0: /*We break out a separate case based on which of our neighbors use michael@0: the same reference frames. michael@0: This is somewhat faster than trying to make a generic case which michael@0: handles all of them, since it reduces lots of poorly predicted michael@0: jumps to one switch statement, and also lets a number of the michael@0: multiplications be optimized out by strength reduction.*/ michael@0: switch((l_ref==refi)|(ul_ref==refi)<<1| michael@0: (u_ref==refi)<<2|(ur_ref==refi)<<3){ michael@0: default:pred=pred_last[refi];break; michael@0: case 1: michael@0: case 3:pred=frags[fragi-1].dc;break; michael@0: case 2:pred=u_frags[fragi-1].dc;break; michael@0: case 4: michael@0: case 6: michael@0: case 12:pred=u_frags[fragi].dc;break; michael@0: case 5:pred=(frags[fragi-1].dc+u_frags[fragi].dc)/2;break; michael@0: case 8:pred=u_frags[fragi+1].dc;break; michael@0: case 9: michael@0: case 11: michael@0: case 13:{ michael@0: /*The TI compiler mis-compiles this line.*/ michael@0: pred=(75*frags[fragi-1].dc+53*u_frags[fragi+1].dc)/128; michael@0: }break; michael@0: case 10:pred=(u_frags[fragi-1].dc+u_frags[fragi+1].dc)/2;break; michael@0: case 14:{ michael@0: pred=(3*(u_frags[fragi-1].dc+u_frags[fragi+1].dc) michael@0: +10*u_frags[fragi].dc)/16; michael@0: }break; michael@0: case 7: michael@0: case 15:{ michael@0: int p0; michael@0: int p1; michael@0: int p2; michael@0: p0=frags[fragi-1].dc; michael@0: p1=u_frags[fragi-1].dc; michael@0: p2=u_frags[fragi].dc; michael@0: pred=(29*(p0+p2)-26*p1)/32; michael@0: if(abs(pred-p2)>128)pred=p2; michael@0: else if(abs(pred-p0)>128)pred=p0; michael@0: else if(abs(pred-p1)>128)pred=p1; michael@0: }break; michael@0: } michael@0: pred_last[refi]=frags[fragi].dc+=pred; michael@0: ncoded_fragis++; michael@0: l_ref=refi; michael@0: } michael@0: else l_ref=-1; michael@0: ul_ref=u_ref; michael@0: u_ref=ur_ref; michael@0: } michael@0: } michael@0: } michael@0: _pipe->ncoded_fragis[_pli]=ncoded_fragis; michael@0: /*Also save the number of uncoded fragments so we know how many to copy.*/ michael@0: _pipe->nuncoded_fragis[_pli]= michael@0: (fragy_end-fragy0)*(ptrdiff_t)nhfrags-ncoded_fragis; michael@0: } michael@0: michael@0: /*Reconstructs all coded fragments in a single MCU (one or two super block michael@0: rows). michael@0: This requires that each coded fragment have a proper macro block mode and michael@0: motion vector (if not in INTRA mode), and have its DC value decoded, with michael@0: the DC prediction process reversed, and the number of coded and uncoded michael@0: fragments in this plane of the MCU be counted. michael@0: The token lists for each color plane and coefficient should also be filled michael@0: in, along with initial token offsets, extra bits offsets, and EOB run michael@0: counts.*/ michael@0: static void oc_dec_frags_recon_mcu_plane(oc_dec_ctx *_dec, michael@0: oc_dec_pipeline_state *_pipe,int _pli){ michael@0: unsigned char *dct_tokens; michael@0: const unsigned char *dct_fzig_zag; michael@0: ogg_uint16_t dc_quant[2]; michael@0: const oc_fragment *frags; michael@0: const ptrdiff_t *coded_fragis; michael@0: ptrdiff_t ncoded_fragis; michael@0: ptrdiff_t fragii; michael@0: ptrdiff_t *ti; michael@0: ptrdiff_t *eob_runs; michael@0: int qti; michael@0: dct_tokens=_dec->dct_tokens; michael@0: dct_fzig_zag=_dec->state.opt_data.dct_fzig_zag; michael@0: frags=_dec->state.frags; michael@0: coded_fragis=_pipe->coded_fragis[_pli]; michael@0: ncoded_fragis=_pipe->ncoded_fragis[_pli]; michael@0: ti=_pipe->ti[_pli]; michael@0: eob_runs=_pipe->eob_runs[_pli]; michael@0: for(qti=0;qti<2;qti++)dc_quant[qti]=_pipe->dequant[_pli][0][qti][0]; michael@0: for(fragii=0;fragiidequant[_pli][frags[fragi].qii][qti]; michael@0: /*Decode the AC coefficients.*/ michael@0: for(zzi=0;zzi<64;){ michael@0: int token; michael@0: last_zzi=zzi; michael@0: if(eob_runs[zzi]){ michael@0: eob_runs[zzi]--; michael@0: break; michael@0: } michael@0: else{ michael@0: ptrdiff_t eob; michael@0: int cw; michael@0: int rlen; michael@0: int coeff; michael@0: int lti; michael@0: lti=ti[zzi]; michael@0: token=dct_tokens[lti++]; michael@0: cw=OC_DCT_CODE_WORD[token]; michael@0: /*These parts could be done branchless, but the branches are fairly michael@0: predictable and the C code translates into more than a few michael@0: instructions, so it's worth it to avoid them.*/ michael@0: if(OC_DCT_TOKEN_NEEDS_MORE(token)){ michael@0: cw+=dct_tokens[lti++]<>OC_DCT_CW_EOB_SHIFT&0xFFF; michael@0: if(token==OC_DCT_TOKEN_FAT_EOB){ michael@0: eob+=dct_tokens[lti++]<<8; michael@0: if(eob==0)eob=OC_DCT_EOB_FINISH; michael@0: } michael@0: rlen=(unsigned char)(cw>>OC_DCT_CW_RLEN_SHIFT); michael@0: cw^=-(cw&1<>OC_DCT_CW_MAG_SHIFT; michael@0: eob_runs[zzi]=eob; michael@0: ti[zzi]=lti; michael@0: zzi+=rlen; michael@0: _pipe->dct_coeffs[dct_fzig_zag[zzi]]= michael@0: (ogg_int16_t)(coeff*(int)ac_quant[zzi]); michael@0: zzi+=!eob; michael@0: } michael@0: } michael@0: /*TODO: zzi should be exactly 64 here. michael@0: If it's not, we should report some kind of warning.*/ michael@0: zzi=OC_MINI(zzi,64); michael@0: _pipe->dct_coeffs[0]=(ogg_int16_t)frags[fragi].dc; michael@0: /*last_zzi is always initialized. michael@0: If your compiler thinks otherwise, it is dumb.*/ michael@0: oc_state_frag_recon(&_dec->state,fragi,_pli, michael@0: _pipe->dct_coeffs,last_zzi,dc_quant[qti]); michael@0: } michael@0: _pipe->coded_fragis[_pli]+=ncoded_fragis; michael@0: /*Right now the reconstructed MCU has only the coded blocks in it.*/ michael@0: /*TODO: We make the decision here to always copy the uncoded blocks into it michael@0: from the reference frame. michael@0: We could also copy the coded blocks back over the reference frame, if we michael@0: wait for an additional MCU to be decoded, which might be faster if only a michael@0: small number of blocks are coded. michael@0: However, this introduces more latency, creating a larger cache footprint. michael@0: It's unknown which decision is better, but this one results in simpler michael@0: code, and the hard case (high bitrate, high resolution) is handled michael@0: correctly.*/ michael@0: /*Copy the uncoded blocks from the previous reference frame.*/ michael@0: if(_pipe->nuncoded_fragis[_pli]>0){ michael@0: _pipe->uncoded_fragis[_pli]-=_pipe->nuncoded_fragis[_pli]; michael@0: oc_frag_copy_list(&_dec->state, michael@0: _dec->state.ref_frame_data[OC_FRAME_SELF], michael@0: _dec->state.ref_frame_data[OC_FRAME_PREV], michael@0: _dec->state.ref_ystride[_pli],_pipe->uncoded_fragis[_pli], michael@0: _pipe->nuncoded_fragis[_pli],_dec->state.frag_buf_offs); michael@0: } michael@0: } michael@0: michael@0: /*Filter a horizontal block edge.*/ michael@0: static void oc_filter_hedge(unsigned char *_dst,int _dst_ystride, michael@0: const unsigned char *_src,int _src_ystride,int _qstep,int _flimit, michael@0: int *_variance0,int *_variance1){ michael@0: unsigned char *rdst; michael@0: const unsigned char *rsrc; michael@0: unsigned char *cdst; michael@0: const unsigned char *csrc; michael@0: int r[10]; michael@0: int sum0; michael@0: int sum1; michael@0: int bx; michael@0: int by; michael@0: rdst=_dst; michael@0: rsrc=_src; michael@0: for(bx=0;bx<8;bx++){ michael@0: cdst=rdst; michael@0: csrc=rsrc; michael@0: for(by=0;by<10;by++){ michael@0: r[by]=*csrc; michael@0: csrc+=_src_ystride; michael@0: } michael@0: sum0=sum1=0; michael@0: for(by=0;by<4;by++){ michael@0: sum0+=abs(r[by+1]-r[by]); michael@0: sum1+=abs(r[by+5]-r[by+6]); michael@0: } michael@0: *_variance0+=OC_MINI(255,sum0); michael@0: *_variance1+=OC_MINI(255,sum1); michael@0: if(sum0<_flimit&&sum1<_flimit&&r[5]-r[4]<_qstep&&r[4]-r[5]<_qstep){ michael@0: *cdst=(unsigned char)(r[0]*3+r[1]*2+r[2]+r[3]+r[4]+4>>3); michael@0: cdst+=_dst_ystride; michael@0: *cdst=(unsigned char)(r[0]*2+r[1]+r[2]*2+r[3]+r[4]+r[5]+4>>3); michael@0: cdst+=_dst_ystride; michael@0: for(by=0;by<4;by++){ michael@0: *cdst=(unsigned char)(r[by]+r[by+1]+r[by+2]+r[by+3]*2+ michael@0: r[by+4]+r[by+5]+r[by+6]+4>>3); michael@0: cdst+=_dst_ystride; michael@0: } michael@0: *cdst=(unsigned char)(r[4]+r[5]+r[6]+r[7]*2+r[8]+r[9]*2+4>>3); michael@0: cdst+=_dst_ystride; michael@0: *cdst=(unsigned char)(r[5]+r[6]+r[7]+r[8]*2+r[9]*3+4>>3); michael@0: } michael@0: else{ michael@0: for(by=1;by<=8;by++){ michael@0: *cdst=(unsigned char)r[by]; michael@0: cdst+=_dst_ystride; michael@0: } michael@0: } michael@0: rdst++; michael@0: rsrc++; michael@0: } michael@0: } michael@0: michael@0: /*Filter a vertical block edge.*/ michael@0: static void oc_filter_vedge(unsigned char *_dst,int _dst_ystride, michael@0: int _qstep,int _flimit,int *_variances){ michael@0: unsigned char *rdst; michael@0: const unsigned char *rsrc; michael@0: unsigned char *cdst; michael@0: int r[10]; michael@0: int sum0; michael@0: int sum1; michael@0: int bx; michael@0: int by; michael@0: cdst=_dst; michael@0: for(by=0;by<8;by++){ michael@0: rsrc=cdst-1; michael@0: rdst=cdst; michael@0: for(bx=0;bx<10;bx++)r[bx]=*rsrc++; michael@0: sum0=sum1=0; michael@0: for(bx=0;bx<4;bx++){ michael@0: sum0+=abs(r[bx+1]-r[bx]); michael@0: sum1+=abs(r[bx+5]-r[bx+6]); michael@0: } michael@0: _variances[0]+=OC_MINI(255,sum0); michael@0: _variances[1]+=OC_MINI(255,sum1); michael@0: if(sum0<_flimit&&sum1<_flimit&&r[5]-r[4]<_qstep&&r[4]-r[5]<_qstep){ michael@0: *rdst++=(unsigned char)(r[0]*3+r[1]*2+r[2]+r[3]+r[4]+4>>3); michael@0: *rdst++=(unsigned char)(r[0]*2+r[1]+r[2]*2+r[3]+r[4]+r[5]+4>>3); michael@0: for(bx=0;bx<4;bx++){ michael@0: *rdst++=(unsigned char)(r[bx]+r[bx+1]+r[bx+2]+r[bx+3]*2+ michael@0: r[bx+4]+r[bx+5]+r[bx+6]+4>>3); michael@0: } michael@0: *rdst++=(unsigned char)(r[4]+r[5]+r[6]+r[7]*2+r[8]+r[9]*2+4>>3); michael@0: *rdst=(unsigned char)(r[5]+r[6]+r[7]+r[8]*2+r[9]*3+4>>3); michael@0: } michael@0: cdst+=_dst_ystride; michael@0: } michael@0: } michael@0: michael@0: static void oc_dec_deblock_frag_rows(oc_dec_ctx *_dec, michael@0: th_img_plane *_dst,th_img_plane *_src,int _pli,int _fragy0, michael@0: int _fragy_end){ michael@0: oc_fragment_plane *fplane; michael@0: int *variance; michael@0: unsigned char *dc_qi; michael@0: unsigned char *dst; michael@0: const unsigned char *src; michael@0: ptrdiff_t froffset; michael@0: int dst_ystride; michael@0: int src_ystride; michael@0: int nhfrags; michael@0: int width; michael@0: int notstart; michael@0: int notdone; michael@0: int flimit; michael@0: int qstep; michael@0: int y_end; michael@0: int y; michael@0: int x; michael@0: _dst+=_pli; michael@0: _src+=_pli; michael@0: fplane=_dec->state.fplanes+_pli; michael@0: nhfrags=fplane->nhfrags; michael@0: froffset=fplane->froffset+_fragy0*(ptrdiff_t)nhfrags; michael@0: variance=_dec->variances+froffset; michael@0: dc_qi=_dec->dc_qis+froffset; michael@0: notstart=_fragy0>0; michael@0: notdone=_fragy_endnvfrags; michael@0: /*We want to clear an extra row of variances, except at the end.*/ michael@0: memset(variance+(nhfrags&-notstart),0, michael@0: (_fragy_end+notdone-_fragy0-notstart)*(nhfrags*sizeof(variance[0]))); michael@0: /*Except for the first time, we want to point to the middle of the row.*/ michael@0: y=(_fragy0<<3)+(notstart<<2); michael@0: dst_ystride=_dst->stride; michael@0: src_ystride=_src->stride; michael@0: dst=_dst->data+y*(ptrdiff_t)dst_ystride; michael@0: src=_src->data+y*(ptrdiff_t)src_ystride; michael@0: width=_dst->width; michael@0: for(;y<4;y++){ michael@0: memcpy(dst,src,width*sizeof(dst[0])); michael@0: dst+=dst_ystride; michael@0: src+=src_ystride; michael@0: } michael@0: /*We also want to skip the last row in the frame for this loop.*/ michael@0: y_end=_fragy_end-!notdone<<3; michael@0: for(;ypp_dc_scale[*dc_qi]; michael@0: flimit=(qstep*3)>>2; michael@0: oc_filter_hedge(dst,dst_ystride,src-src_ystride,src_ystride, michael@0: qstep,flimit,variance,variance+nhfrags); michael@0: variance++; michael@0: dc_qi++; michael@0: for(x=8;xpp_dc_scale[*dc_qi]; michael@0: flimit=(qstep*3)>>2; michael@0: oc_filter_hedge(dst+x,dst_ystride,src+x-src_ystride,src_ystride, michael@0: qstep,flimit,variance,variance+nhfrags); michael@0: oc_filter_vedge(dst+x-(dst_ystride<<2)-4,dst_ystride, michael@0: qstep,flimit,variance-1); michael@0: variance++; michael@0: dc_qi++; michael@0: } michael@0: dst+=dst_ystride<<3; michael@0: src+=src_ystride<<3; michael@0: } michael@0: /*And finally, handle the last row in the frame, if it's in the range.*/ michael@0: if(!notdone){ michael@0: int height; michael@0: height=_dst->height; michael@0: for(;ypp_dc_scale[*dc_qi++]; michael@0: flimit=(qstep*3)>>2; michael@0: oc_filter_vedge(dst+x-(dst_ystride<<3)-4,dst_ystride, michael@0: qstep,flimit,variance++); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void oc_dering_block(unsigned char *_idata,int _ystride,int _b, michael@0: int _dc_scale,int _sharp_mod,int _strong){ michael@0: static const unsigned char OC_MOD_MAX[2]={24,32}; michael@0: static const unsigned char OC_MOD_SHIFT[2]={1,0}; michael@0: const unsigned char *psrc; michael@0: const unsigned char *src; michael@0: const unsigned char *nsrc; michael@0: unsigned char *dst; michael@0: int vmod[72]; michael@0: int hmod[72]; michael@0: int mod_hi; michael@0: int by; michael@0: int bx; michael@0: mod_hi=OC_MINI(3*_dc_scale,OC_MOD_MAX[_strong]); michael@0: dst=_idata; michael@0: src=dst; michael@0: psrc=src-(_ystride&-!(_b&4)); michael@0: for(by=0;by<9;by++){ michael@0: for(bx=0;bx<8;bx++){ michael@0: int mod; michael@0: mod=32+_dc_scale-(abs(src[bx]-psrc[bx])<>7); michael@0: for(bx=1;bx<7;bx++){ michael@0: a=128; michael@0: b=64; michael@0: w=hmod[(bx<<3)+by]; michael@0: a-=w; michael@0: b+=w*src[bx-1]; michael@0: w=vmod[(by<<3)+bx]; michael@0: a-=w; michael@0: b+=w*psrc[bx]; michael@0: w=vmod[(by+1<<3)+bx]; michael@0: a-=w; michael@0: b+=w*nsrc[bx]; michael@0: w=hmod[(bx+1<<3)+by]; michael@0: a-=w; michael@0: b+=w*src[bx+1]; michael@0: dst[bx]=OC_CLAMP255(a*src[bx]+b>>7); michael@0: } michael@0: a=128; michael@0: b=64; michael@0: w=hmod[(7<<3)+by]; michael@0: a-=w; michael@0: b+=w*src[6]; michael@0: w=vmod[(by<<3)+7]; michael@0: a-=w; michael@0: b+=w*psrc[7]; michael@0: w=vmod[(by+1<<3)+7]; michael@0: a-=w; michael@0: b+=w*nsrc[7]; michael@0: w=hmod[(8<<3)+by]; michael@0: a-=w; michael@0: b+=w*src[7+!(_b&2)]; michael@0: dst[7]=OC_CLAMP255(a*src[7]+b>>7); michael@0: dst+=_ystride; michael@0: psrc=src; michael@0: src=nsrc; michael@0: nsrc+=_ystride&-(!(_b&8)|by<6); michael@0: } michael@0: } michael@0: michael@0: #define OC_DERING_THRESH1 (384) michael@0: #define OC_DERING_THRESH2 (4*OC_DERING_THRESH1) michael@0: #define OC_DERING_THRESH3 (5*OC_DERING_THRESH1) michael@0: #define OC_DERING_THRESH4 (10*OC_DERING_THRESH1) michael@0: michael@0: static void oc_dec_dering_frag_rows(oc_dec_ctx *_dec,th_img_plane *_img, michael@0: int _pli,int _fragy0,int _fragy_end){ michael@0: th_img_plane *iplane; michael@0: oc_fragment_plane *fplane; michael@0: oc_fragment *frag; michael@0: int *variance; michael@0: unsigned char *idata; michael@0: ptrdiff_t froffset; michael@0: int ystride; michael@0: int nhfrags; michael@0: int sthresh; michael@0: int strong; michael@0: int y_end; michael@0: int width; michael@0: int height; michael@0: int y; michael@0: int x; michael@0: iplane=_img+_pli; michael@0: fplane=_dec->state.fplanes+_pli; michael@0: nhfrags=fplane->nhfrags; michael@0: froffset=fplane->froffset+_fragy0*(ptrdiff_t)nhfrags; michael@0: variance=_dec->variances+froffset; michael@0: frag=_dec->state.frags+froffset; michael@0: strong=_dec->pp_level>=(_pli?OC_PP_LEVEL_SDERINGC:OC_PP_LEVEL_SDERINGY); michael@0: sthresh=_pli?OC_DERING_THRESH4:OC_DERING_THRESH3; michael@0: y=_fragy0<<3; michael@0: ystride=iplane->stride; michael@0: idata=iplane->data+y*(ptrdiff_t)ystride; michael@0: y_end=_fragy_end<<3; michael@0: width=iplane->width; michael@0: height=iplane->height; michael@0: for(;ystate.qis[frag->qii]; michael@0: var=*variance; michael@0: b=(x<=0)|(x+8>=width)<<1|(y<=0)<<2|(y+8>=height)<<3; michael@0: if(strong&&var>sthresh){ michael@0: oc_dering_block(idata+x,ystride,b, michael@0: _dec->pp_dc_scale[qi],_dec->pp_sharp_mod[qi],1); michael@0: if(_pli||!(b&1)&&*(variance-1)>OC_DERING_THRESH4|| michael@0: !(b&2)&&variance[1]>OC_DERING_THRESH4|| michael@0: !(b&4)&&*(variance-nhfrags)>OC_DERING_THRESH4|| michael@0: !(b&8)&&variance[nhfrags]>OC_DERING_THRESH4){ michael@0: oc_dering_block(idata+x,ystride,b, michael@0: _dec->pp_dc_scale[qi],_dec->pp_sharp_mod[qi],1); michael@0: oc_dering_block(idata+x,ystride,b, michael@0: _dec->pp_dc_scale[qi],_dec->pp_sharp_mod[qi],1); michael@0: } michael@0: } michael@0: else if(var>OC_DERING_THRESH2){ michael@0: oc_dering_block(idata+x,ystride,b, michael@0: _dec->pp_dc_scale[qi],_dec->pp_sharp_mod[qi],1); michael@0: } michael@0: else if(var>OC_DERING_THRESH1){ michael@0: oc_dering_block(idata+x,ystride,b, michael@0: _dec->pp_dc_scale[qi],_dec->pp_sharp_mod[qi],0); michael@0: } michael@0: frag++; michael@0: variance++; michael@0: } michael@0: idata+=ystride<<3; michael@0: } michael@0: } michael@0: michael@0: michael@0: michael@0: th_dec_ctx *th_decode_alloc(const th_info *_info,const th_setup_info *_setup){ michael@0: oc_dec_ctx *dec; michael@0: if(_info==NULL||_setup==NULL)return NULL; michael@0: dec=oc_aligned_malloc(sizeof(*dec),16); michael@0: if(dec==NULL||oc_dec_init(dec,_info,_setup)<0){ michael@0: oc_aligned_free(dec); michael@0: return NULL; michael@0: } michael@0: dec->state.curframe_num=0; michael@0: return dec; michael@0: } michael@0: michael@0: void th_decode_free(th_dec_ctx *_dec){ michael@0: if(_dec!=NULL){ michael@0: oc_dec_clear(_dec); michael@0: oc_aligned_free(_dec); michael@0: } michael@0: } michael@0: michael@0: int th_decode_ctl(th_dec_ctx *_dec,int _req,void *_buf, michael@0: size_t _buf_sz){ michael@0: switch(_req){ michael@0: case TH_DECCTL_GET_PPLEVEL_MAX:{ michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: (*(int *)_buf)=OC_PP_LEVEL_MAX; michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_PPLEVEL:{ michael@0: int pp_level; michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: pp_level=*(int *)_buf; michael@0: if(pp_level<0||pp_level>OC_PP_LEVEL_MAX)return TH_EINVAL; michael@0: _dec->pp_level=pp_level; michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_GRANPOS:{ michael@0: ogg_int64_t granpos; michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(ogg_int64_t))return TH_EINVAL; michael@0: granpos=*(ogg_int64_t *)_buf; michael@0: if(granpos<0)return TH_EINVAL; michael@0: _dec->state.granpos=granpos; michael@0: _dec->state.keyframe_num=(granpos>>_dec->state.info.keyframe_granule_shift) michael@0: -_dec->state.granpos_bias; michael@0: _dec->state.curframe_num=_dec->state.keyframe_num michael@0: +(granpos&(1<<_dec->state.info.keyframe_granule_shift)-1); michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_STRIPE_CB:{ michael@0: th_stripe_callback *cb; michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(th_stripe_callback))return TH_EINVAL; michael@0: cb=(th_stripe_callback *)_buf; michael@0: _dec->stripe_cb.ctx=cb->ctx; michael@0: _dec->stripe_cb.stripe_decoded=cb->stripe_decoded; michael@0: return 0; michael@0: }break; michael@0: #ifdef HAVE_CAIRO michael@0: case TH_DECCTL_SET_TELEMETRY_MBMODE:{ michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: _dec->telemetry=1; michael@0: _dec->telemetry_mbmode=*(int *)_buf; michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_TELEMETRY_MV:{ michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: _dec->telemetry=1; michael@0: _dec->telemetry_mv=*(int *)_buf; michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_TELEMETRY_QI:{ michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: _dec->telemetry=1; michael@0: _dec->telemetry_qi=*(int *)_buf; michael@0: return 0; michael@0: }break; michael@0: case TH_DECCTL_SET_TELEMETRY_BITS:{ michael@0: if(_dec==NULL||_buf==NULL)return TH_EFAULT; michael@0: if(_buf_sz!=sizeof(int))return TH_EINVAL; michael@0: _dec->telemetry=1; michael@0: _dec->telemetry_bits=*(int *)_buf; michael@0: return 0; michael@0: }break; michael@0: #endif michael@0: default:return TH_EIMPL; michael@0: } michael@0: } michael@0: michael@0: /*We're decoding an INTER frame, but have no initialized reference michael@0: buffers (i.e., decoding did not start on a key frame). michael@0: We initialize them to a solid gray here.*/ michael@0: static void oc_dec_init_dummy_frame(th_dec_ctx *_dec){ michael@0: th_info *info; michael@0: size_t yplane_sz; michael@0: size_t cplane_sz; michael@0: ptrdiff_t yoffset; michael@0: int yhstride; michael@0: int yheight; michael@0: int chstride; michael@0: int cheight; michael@0: _dec->state.ref_frame_idx[OC_FRAME_GOLD]=0; michael@0: _dec->state.ref_frame_idx[OC_FRAME_PREV]=0; michael@0: _dec->state.ref_frame_idx[OC_FRAME_SELF]=0; michael@0: _dec->state.ref_frame_data[OC_FRAME_GOLD]= michael@0: _dec->state.ref_frame_data[OC_FRAME_PREV]= michael@0: _dec->state.ref_frame_data[OC_FRAME_SELF]= michael@0: _dec->state.ref_frame_bufs[0][0].data; michael@0: memcpy(_dec->pp_frame_buf,_dec->state.ref_frame_bufs[0], michael@0: sizeof(_dec->pp_frame_buf[0])*3); michael@0: info=&_dec->state.info; michael@0: yhstride=abs(_dec->state.ref_ystride[0]); michael@0: yheight=info->frame_height+2*OC_UMV_PADDING; michael@0: chstride=abs(_dec->state.ref_ystride[1]); michael@0: cheight=yheight>>!(info->pixel_fmt&2); michael@0: yplane_sz=yhstride*(size_t)yheight+16; michael@0: cplane_sz=chstride*(size_t)cheight; michael@0: yoffset=yhstride*(ptrdiff_t)(yheight-OC_UMV_PADDING-1)+OC_UMV_PADDING; michael@0: memset(_dec->state.ref_frame_data[0]-yoffset,0x80,yplane_sz+2*cplane_sz); michael@0: } michael@0: michael@0: int th_decode_packetin(th_dec_ctx *_dec,const ogg_packet *_op, michael@0: ogg_int64_t *_granpos){ michael@0: int ret; michael@0: if(_dec==NULL||_op==NULL)return TH_EFAULT; michael@0: /*A completely empty packet indicates a dropped frame and is treated exactly michael@0: like an inter frame with no coded blocks.*/ michael@0: if(_op->bytes==0){ michael@0: _dec->state.frame_type=OC_INTER_FRAME; michael@0: _dec->state.ntotal_coded_fragis=0; michael@0: } michael@0: else{ michael@0: oc_pack_readinit(&_dec->opb,_op->packet,_op->bytes); michael@0: ret=oc_dec_frame_header_unpack(_dec); michael@0: if(ret<0)return ret; michael@0: if(_dec->state.frame_type==OC_INTRA_FRAME)oc_dec_mark_all_intra(_dec); michael@0: else oc_dec_coded_flags_unpack(_dec); michael@0: } michael@0: /*If there have been no reference frames, and we need one, initialize one.*/ michael@0: if(_dec->state.frame_type!=OC_INTRA_FRAME&& michael@0: (_dec->state.ref_frame_idx[OC_FRAME_GOLD]<0|| michael@0: _dec->state.ref_frame_idx[OC_FRAME_PREV]<0)){ michael@0: oc_dec_init_dummy_frame(_dec); michael@0: } michael@0: /*If this was an inter frame with no coded blocks...*/ michael@0: if(_dec->state.ntotal_coded_fragis<=0){ michael@0: /*Just update the granule position and return.*/ michael@0: _dec->state.granpos=(_dec->state.keyframe_num+_dec->state.granpos_bias<< michael@0: _dec->state.info.keyframe_granule_shift) michael@0: +(_dec->state.curframe_num-_dec->state.keyframe_num); michael@0: _dec->state.curframe_num++; michael@0: if(_granpos!=NULL)*_granpos=_dec->state.granpos; michael@0: return TH_DUPFRAME; michael@0: } michael@0: else{ michael@0: th_ycbcr_buffer stripe_buf; michael@0: int stripe_fragy; michael@0: int refi; michael@0: int pli; michael@0: int notstart; michael@0: int notdone; michael@0: /*Select a free buffer to use for the reconstructed version of this frame.*/ michael@0: for(refi=0;refi==_dec->state.ref_frame_idx[OC_FRAME_GOLD]|| michael@0: refi==_dec->state.ref_frame_idx[OC_FRAME_PREV];refi++); michael@0: _dec->state.ref_frame_idx[OC_FRAME_SELF]=refi; michael@0: _dec->state.ref_frame_data[OC_FRAME_SELF]= michael@0: _dec->state.ref_frame_bufs[refi][0].data; michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_frame_bytes=_op->bytes; michael@0: #endif michael@0: if(_dec->state.frame_type==OC_INTRA_FRAME){ michael@0: _dec->state.keyframe_num=_dec->state.curframe_num; michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_coding_bytes= michael@0: _dec->telemetry_mode_bytes= michael@0: _dec->telemetry_mv_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: } michael@0: else{ michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_coding_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: oc_dec_mb_modes_unpack(_dec); michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_mode_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: oc_dec_mv_unpack_and_frag_modes_fill(_dec); michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_mv_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: } michael@0: oc_dec_block_qis_unpack(_dec); michael@0: #if defined(HAVE_CAIRO) michael@0: _dec->telemetry_qi_bytes=oc_pack_bytes_left(&_dec->opb); michael@0: #endif michael@0: oc_dec_residual_tokens_unpack(_dec); michael@0: /*Update granule position. michael@0: This must be done before the striped decode callbacks so that the michael@0: application knows what to do with the frame data.*/ michael@0: _dec->state.granpos=(_dec->state.keyframe_num+_dec->state.granpos_bias<< michael@0: _dec->state.info.keyframe_granule_shift) michael@0: +(_dec->state.curframe_num-_dec->state.keyframe_num); michael@0: _dec->state.curframe_num++; michael@0: if(_granpos!=NULL)*_granpos=_dec->state.granpos; michael@0: /*All of the rest of the operations -- DC prediction reversal, michael@0: reconstructing coded fragments, copying uncoded fragments, loop michael@0: filtering, extending borders, and out-of-loop post-processing -- should michael@0: be pipelined. michael@0: I.e., DC prediction reversal, reconstruction, and uncoded fragment michael@0: copying are done for one or two super block rows, then loop filtering is michael@0: run as far as it can, then bordering copying, then post-processing. michael@0: For 4:2:0 video a Minimum Codable Unit or MCU contains two luma super michael@0: block rows, and one chroma. michael@0: Otherwise, an MCU consists of one super block row from each plane. michael@0: Inside each MCU, we perform all of the steps on one color plane before michael@0: moving on to the next. michael@0: After reconstruction, the additional filtering stages introduce a delay michael@0: since they need some pixels from the next fragment row. michael@0: Thus the actual number of decoded rows available is slightly smaller for michael@0: the first MCU, and slightly larger for the last. michael@0: michael@0: This entire process allows us to operate on the data while it is still in michael@0: cache, resulting in big performance improvements. michael@0: An application callback allows further application processing (blitting michael@0: to video memory, color conversion, etc.) to also use the data while it's michael@0: in cache.*/ michael@0: oc_dec_pipeline_init(_dec,&_dec->pipe); michael@0: oc_ycbcr_buffer_flip(stripe_buf,_dec->pp_frame_buf); michael@0: notstart=0; michael@0: notdone=1; michael@0: for(stripe_fragy=0;notdone;stripe_fragy+=_dec->pipe.mcu_nvfrags){ michael@0: int avail_fragy0; michael@0: int avail_fragy_end; michael@0: avail_fragy0=avail_fragy_end=_dec->state.fplanes[0].nvfrags; michael@0: notdone=stripe_fragy+_dec->pipe.mcu_nvfragsstate.fplanes+pli; michael@0: /*Compute the first and last fragment row of the current MCU for this michael@0: plane.*/ michael@0: frag_shift=pli!=0&&!(_dec->state.info.pixel_fmt&2); michael@0: _dec->pipe.fragy0[pli]=stripe_fragy>>frag_shift; michael@0: _dec->pipe.fragy_end[pli]=OC_MINI(fplane->nvfrags, michael@0: _dec->pipe.fragy0[pli]+(_dec->pipe.mcu_nvfrags>>frag_shift)); michael@0: oc_dec_dc_unpredict_mcu_plane(_dec,&_dec->pipe,pli); michael@0: oc_dec_frags_recon_mcu_plane(_dec,&_dec->pipe,pli); michael@0: sdelay=edelay=0; michael@0: if(_dec->pipe.loop_filter){ michael@0: sdelay+=notstart; michael@0: edelay+=notdone; michael@0: oc_state_loop_filter_frag_rows(&_dec->state, michael@0: _dec->pipe.bounding_values,OC_FRAME_SELF,pli, michael@0: _dec->pipe.fragy0[pli]-sdelay,_dec->pipe.fragy_end[pli]-edelay); michael@0: } michael@0: /*To fill the borders, we have an additional two pixel delay, since a michael@0: fragment in the next row could filter its top edge, using two pixels michael@0: from a fragment in this row. michael@0: But there's no reason to delay a full fragment between the two.*/ michael@0: oc_state_borders_fill_rows(&_dec->state,refi,pli, michael@0: (_dec->pipe.fragy0[pli]-sdelay<<3)-(sdelay<<1), michael@0: (_dec->pipe.fragy_end[pli]-edelay<<3)-(edelay<<1)); michael@0: /*Out-of-loop post-processing.*/ michael@0: pp_offset=3*(pli!=0); michael@0: if(_dec->pipe.pp_level>=OC_PP_LEVEL_DEBLOCKY+pp_offset){ michael@0: /*Perform de-blocking in one plane.*/ michael@0: sdelay+=notstart; michael@0: edelay+=notdone; michael@0: oc_dec_deblock_frag_rows(_dec,_dec->pp_frame_buf, michael@0: _dec->state.ref_frame_bufs[refi],pli, michael@0: _dec->pipe.fragy0[pli]-sdelay,_dec->pipe.fragy_end[pli]-edelay); michael@0: if(_dec->pipe.pp_level>=OC_PP_LEVEL_DERINGY+pp_offset){ michael@0: /*Perform de-ringing in one plane.*/ michael@0: sdelay+=notstart; michael@0: edelay+=notdone; michael@0: oc_dec_dering_frag_rows(_dec,_dec->pp_frame_buf,pli, michael@0: _dec->pipe.fragy0[pli]-sdelay,_dec->pipe.fragy_end[pli]-edelay); michael@0: } michael@0: } michael@0: /*If no post-processing is done, we still need to delay a row for the michael@0: loop filter, thanks to the strange filtering order VP3 chose.*/ michael@0: else if(_dec->pipe.loop_filter){ michael@0: sdelay+=notstart; michael@0: edelay+=notdone; michael@0: } michael@0: /*Compute the intersection of the available rows in all planes. michael@0: If chroma is sub-sampled, the effect of each of its delays is michael@0: doubled, but luma might have more post-processing filters enabled michael@0: than chroma, so we don't know up front which one is the limiting michael@0: factor.*/ michael@0: avail_fragy0=OC_MINI(avail_fragy0, michael@0: _dec->pipe.fragy0[pli]-sdelay<pipe.fragy_end[pli]-edelay<stripe_cb.stripe_decoded!=NULL){ michael@0: /*The callback might want to use the FPU, so let's make sure they can. michael@0: We violate all kinds of ABI restrictions by not doing this until michael@0: now, but none of them actually matter since we don't use floating michael@0: point ourselves.*/ michael@0: oc_restore_fpu(&_dec->state); michael@0: /*Make the callback, ensuring we flip the sense of the "start" and michael@0: "end" of the available region upside down.*/ michael@0: (*_dec->stripe_cb.stripe_decoded)(_dec->stripe_cb.ctx,stripe_buf, michael@0: _dec->state.fplanes[0].nvfrags-avail_fragy_end, michael@0: _dec->state.fplanes[0].nvfrags-avail_fragy0); michael@0: } michael@0: notstart=1; michael@0: } michael@0: /*Finish filling in the reference frame borders.*/ michael@0: for(pli=0;pli<3;pli++)oc_state_borders_fill_caps(&_dec->state,refi,pli); michael@0: /*Update the reference frame indices.*/ michael@0: if(_dec->state.frame_type==OC_INTRA_FRAME){ michael@0: /*The new frame becomes both the previous and gold reference frames.*/ michael@0: _dec->state.ref_frame_idx[OC_FRAME_GOLD]= michael@0: _dec->state.ref_frame_idx[OC_FRAME_PREV]= michael@0: _dec->state.ref_frame_idx[OC_FRAME_SELF]; michael@0: _dec->state.ref_frame_data[OC_FRAME_GOLD]= michael@0: _dec->state.ref_frame_data[OC_FRAME_PREV]= michael@0: _dec->state.ref_frame_data[OC_FRAME_SELF]; michael@0: } michael@0: else{ michael@0: /*Otherwise, just replace the previous reference frame.*/ michael@0: _dec->state.ref_frame_idx[OC_FRAME_PREV]= michael@0: _dec->state.ref_frame_idx[OC_FRAME_SELF]; michael@0: _dec->state.ref_frame_data[OC_FRAME_PREV]= michael@0: _dec->state.ref_frame_data[OC_FRAME_SELF]; michael@0: } michael@0: /*Restore the FPU before dump_frame, since that _does_ use the FPU (for PNG michael@0: gamma values, if nothing else).*/ michael@0: oc_restore_fpu(&_dec->state); michael@0: #if defined(OC_DUMP_IMAGES) michael@0: /*We only dump images if there were some coded blocks.*/ michael@0: oc_state_dump_frame(&_dec->state,OC_FRAME_SELF,"dec"); michael@0: #endif michael@0: return 0; michael@0: } michael@0: } michael@0: michael@0: int th_decode_ycbcr_out(th_dec_ctx *_dec,th_ycbcr_buffer _ycbcr){ michael@0: if(_dec==NULL||_ycbcr==NULL)return TH_EFAULT; michael@0: oc_ycbcr_buffer_flip(_ycbcr,_dec->pp_frame_buf); michael@0: #if defined(HAVE_CAIRO) michael@0: /*If telemetry ioctls are active, we need to draw to the output buffer. michael@0: Stuff the plane into cairo.*/ michael@0: if(_dec->telemetry){ michael@0: cairo_surface_t *cs; michael@0: unsigned char *data; michael@0: unsigned char *y_row; michael@0: unsigned char *u_row; michael@0: unsigned char *v_row; michael@0: unsigned char *rgb_row; michael@0: int cstride; michael@0: int w; michael@0: int h; michael@0: int x; michael@0: int y; michael@0: int hdec; michael@0: int vdec; michael@0: w=_ycbcr[0].width; michael@0: h=_ycbcr[0].height; michael@0: hdec=!(_dec->state.info.pixel_fmt&1); michael@0: vdec=!(_dec->state.info.pixel_fmt&2); michael@0: /*Lazy data buffer init. michael@0: We could try to re-use the post-processing buffer, which would save michael@0: memory, but complicate the allocation logic there. michael@0: I don't think anyone cares about memory usage when using telemetry; it is michael@0: not meant for embedded devices.*/ michael@0: if(_dec->telemetry_frame_data==NULL){ michael@0: _dec->telemetry_frame_data=_ogg_malloc( michael@0: (w*h+2*(w>>hdec)*(h>>vdec))*sizeof(*_dec->telemetry_frame_data)); michael@0: if(_dec->telemetry_frame_data==NULL)return 0; michael@0: } michael@0: cs=cairo_image_surface_create(CAIRO_FORMAT_RGB24,w,h); michael@0: /*Sadly, no YUV support in Cairo (yet); convert into the RGB buffer.*/ michael@0: data=cairo_image_surface_get_data(cs); michael@0: if(data==NULL){ michael@0: cairo_surface_destroy(cs); michael@0: return 0; michael@0: } michael@0: cstride=cairo_image_surface_get_stride(cs); michael@0: y_row=_ycbcr[0].data; michael@0: u_row=_ycbcr[1].data; michael@0: v_row=_ycbcr[2].data; michael@0: rgb_row=data; michael@0: for(y=0;y>hdec]-363703744)/1635200; michael@0: g=(3827562*y_row[x]-1287801*u_row[x>>hdec] michael@0: -2672387*v_row[x>>hdec]+447306710)/3287200; michael@0: b=(952000*y_row[x]+1649289*u_row[x>>hdec]-225932192)/817600; michael@0: rgb_row[4*x+0]=OC_CLAMP255(b); michael@0: rgb_row[4*x+1]=OC_CLAMP255(g); michael@0: rgb_row[4*x+2]=OC_CLAMP255(r); michael@0: } michael@0: y_row+=_ycbcr[0].stride; michael@0: u_row+=_ycbcr[1].stride&-((y&1)|!vdec); michael@0: v_row+=_ycbcr[2].stride&-((y&1)|!vdec); michael@0: rgb_row+=cstride; michael@0: } michael@0: /*Draw coded identifier for each macroblock (stored in Hilbert order).*/ michael@0: { michael@0: cairo_t *c; michael@0: const oc_fragment *frags; michael@0: oc_mv *frag_mvs; michael@0: const signed char *mb_modes; michael@0: oc_mb_map *mb_maps; michael@0: size_t nmbs; michael@0: size_t mbi; michael@0: int row2; michael@0: int col2; michael@0: int qim[3]={0,0,0}; michael@0: if(_dec->state.nqis==2){ michael@0: int bqi; michael@0: bqi=_dec->state.qis[0]; michael@0: if(_dec->state.qis[1]>bqi)qim[1]=1; michael@0: if(_dec->state.qis[1]state.nqis==3){ michael@0: int bqi; michael@0: int cqi; michael@0: int dqi; michael@0: bqi=_dec->state.qis[0]; michael@0: cqi=_dec->state.qis[1]; michael@0: dqi=_dec->state.qis[2]; michael@0: if(cqi>bqi&&dqi>bqi){ michael@0: if(dqi>cqi){ michael@0: qim[1]=1; michael@0: qim[2]=2; michael@0: } michael@0: else{ michael@0: qim[1]=2; michael@0: qim[2]=1; michael@0: } michael@0: } michael@0: else if(cqistate.frags; michael@0: frag_mvs=_dec->state.frag_mvs; michael@0: mb_modes=_dec->state.mb_modes; michael@0: mb_maps=_dec->state.mb_maps; michael@0: nmbs=_dec->state.nmbs; michael@0: row2=0; michael@0: col2=0; michael@0: for(mbi=0;mbi>1)&1))*16-16; michael@0: x=(col2>>1)*16; michael@0: cairo_set_line_width(c,1.); michael@0: /*Keyframe (all intra) red box.*/ michael@0: if(_dec->state.frame_type==OC_INTRA_FRAME){ michael@0: if(_dec->telemetry_mbmode&0x02){ michael@0: cairo_set_source_rgba(c,1.,0,0,.5); michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,1.,0,0,.25); michael@0: cairo_fill(c); michael@0: } michael@0: } michael@0: else{ michael@0: ptrdiff_t fragi; michael@0: int frag_mvx; michael@0: int frag_mvy; michael@0: for(bi=0;bi<4;bi++){ michael@0: fragi=mb_maps[mbi][0][bi]; michael@0: if(fragi>=0&&frags[fragi].coded){ michael@0: frag_mvx=OC_MV_X(frag_mvs[fragi]); michael@0: frag_mvy=OC_MV_Y(frag_mvs[fragi]); michael@0: break; michael@0: } michael@0: } michael@0: if(bi<4){ michael@0: switch(mb_modes[mbi]){ michael@0: case OC_MODE_INTRA:{ michael@0: if(_dec->telemetry_mbmode&0x02){ michael@0: cairo_set_source_rgba(c,1.,0,0,.5); michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,1.,0,0,.25); michael@0: cairo_fill(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_INTER_NOMV:{ michael@0: if(_dec->telemetry_mbmode&0x01){ michael@0: cairo_set_source_rgba(c,0,0,1.,.5); michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,0,0,1.,.25); michael@0: cairo_fill(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_INTER_MV:{ michael@0: if(_dec->telemetry_mbmode&0x04){ michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_set_source_rgba(c,0,1.,0,.5); michael@0: cairo_stroke(c); michael@0: } michael@0: if(_dec->telemetry_mv&0x04){ michael@0: cairo_move_to(c,x+8+frag_mvx,y+8-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+8+frag_mvx*.66,y+8-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+8+frag_mvx*.33,y+8-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+8,y+8); michael@0: cairo_stroke(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_INTER_MV_LAST:{ michael@0: if(_dec->telemetry_mbmode&0x08){ michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_set_source_rgba(c,0,1.,0,.5); michael@0: cairo_move_to(c,x+13.5,y+2.5); michael@0: cairo_line_to(c,x+2.5,y+8); michael@0: cairo_line_to(c,x+13.5,y+13.5); michael@0: cairo_stroke(c); michael@0: } michael@0: if(_dec->telemetry_mv&0x08){ michael@0: cairo_move_to(c,x+8+frag_mvx,y+8-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+8+frag_mvx*.66,y+8-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+8+frag_mvx*.33,y+8-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+8,y+8); michael@0: cairo_stroke(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_INTER_MV_LAST2:{ michael@0: if(_dec->telemetry_mbmode&0x10){ michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_set_source_rgba(c,0,1.,0,.5); michael@0: cairo_move_to(c,x+8,y+2.5); michael@0: cairo_line_to(c,x+2.5,y+8); michael@0: cairo_line_to(c,x+8,y+13.5); michael@0: cairo_move_to(c,x+13.5,y+2.5); michael@0: cairo_line_to(c,x+8,y+8); michael@0: cairo_line_to(c,x+13.5,y+13.5); michael@0: cairo_stroke(c); michael@0: } michael@0: if(_dec->telemetry_mv&0x10){ michael@0: cairo_move_to(c,x+8+frag_mvx,y+8-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+8+frag_mvx*.66,y+8-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+8+frag_mvx*.33,y+8-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+8,y+8); michael@0: cairo_stroke(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_GOLDEN_NOMV:{ michael@0: if(_dec->telemetry_mbmode&0x20){ michael@0: cairo_set_source_rgba(c,1.,1.,0,.5); michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,1.,1.,0,.25); michael@0: cairo_fill(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_GOLDEN_MV:{ michael@0: if(_dec->telemetry_mbmode&0x40){ michael@0: cairo_rectangle(c,x+2.5,y+2.5,11,11); michael@0: cairo_set_source_rgba(c,1.,1.,0,.5); michael@0: cairo_stroke(c); michael@0: } michael@0: if(_dec->telemetry_mv&0x40){ michael@0: cairo_move_to(c,x+8+frag_mvx,y+8-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+8+frag_mvx*.66,y+8-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+8+frag_mvx*.33,y+8-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+8,y+8); michael@0: cairo_stroke(c); michael@0: } michael@0: }break; michael@0: case OC_MODE_INTER_MV_FOUR:{ michael@0: if(_dec->telemetry_mbmode&0x80){ michael@0: cairo_rectangle(c,x+2.5,y+2.5,4,4); michael@0: cairo_rectangle(c,x+9.5,y+2.5,4,4); michael@0: cairo_rectangle(c,x+2.5,y+9.5,4,4); michael@0: cairo_rectangle(c,x+9.5,y+9.5,4,4); michael@0: cairo_set_source_rgba(c,0,1.,0,.5); michael@0: cairo_stroke(c); michael@0: } michael@0: /*4mv is odd, coded in raster order.*/ michael@0: fragi=mb_maps[mbi][0][0]; michael@0: if(frags[fragi].coded&&_dec->telemetry_mv&0x80){ michael@0: frag_mvx=OC_MV_X(frag_mvs[fragi]); michael@0: frag_mvx=OC_MV_Y(frag_mvs[fragi]); michael@0: cairo_move_to(c,x+4+frag_mvx,y+12-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+4+frag_mvx*.66,y+12-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+4+frag_mvx*.33,y+12-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+4,y+12); michael@0: cairo_stroke(c); michael@0: } michael@0: fragi=mb_maps[mbi][0][1]; michael@0: if(frags[fragi].coded&&_dec->telemetry_mv&0x80){ michael@0: frag_mvx=OC_MV_X(frag_mvs[fragi]); michael@0: frag_mvx=OC_MV_Y(frag_mvs[fragi]); michael@0: cairo_move_to(c,x+12+frag_mvx,y+12-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+12+frag_mvx*.66,y+12-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+12+frag_mvx*.33,y+12-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+12,y+12); michael@0: cairo_stroke(c); michael@0: } michael@0: fragi=mb_maps[mbi][0][2]; michael@0: if(frags[fragi].coded&&_dec->telemetry_mv&0x80){ michael@0: frag_mvx=OC_MV_X(frag_mvs[fragi]); michael@0: frag_mvx=OC_MV_Y(frag_mvs[fragi]); michael@0: cairo_move_to(c,x+4+frag_mvx,y+4-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+4+frag_mvx*.66,y+4-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+4+frag_mvx*.33,y+4-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+4,y+4); michael@0: cairo_stroke(c); michael@0: } michael@0: fragi=mb_maps[mbi][0][3]; michael@0: if(frags[fragi].coded&&_dec->telemetry_mv&0x80){ michael@0: frag_mvx=OC_MV_X(frag_mvs[fragi]); michael@0: frag_mvx=OC_MV_Y(frag_mvs[fragi]); michael@0: cairo_move_to(c,x+12+frag_mvx,y+4-frag_mvy); michael@0: cairo_set_source_rgba(c,1.,1.,1.,.9); michael@0: cairo_set_line_width(c,3.); michael@0: cairo_line_to(c,x+12+frag_mvx*.66,y+4-frag_mvy*.66); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,2.); michael@0: cairo_line_to(c,x+12+frag_mvx*.33,y+4-frag_mvy*.33); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_line_width(c,1.); michael@0: cairo_line_to(c,x+12,y+4); michael@0: cairo_stroke(c); michael@0: } michael@0: }break; michael@0: } michael@0: } michael@0: } michael@0: /*qii illustration.*/ michael@0: if(_dec->telemetry_qi&0x2){ michael@0: cairo_set_line_cap(c,CAIRO_LINE_CAP_SQUARE); michael@0: for(bi=0;bi<4;bi++){ michael@0: ptrdiff_t fragi; michael@0: int qiv; michael@0: int xp; michael@0: int yp; michael@0: xp=x+(bi&1)*8; michael@0: yp=y+8-(bi&2)*4; michael@0: fragi=mb_maps[mbi][0][bi]; michael@0: if(fragi>=0&&frags[fragi].coded){ michael@0: qiv=qim[frags[fragi].qii]; michael@0: cairo_set_line_width(c,3.); michael@0: cairo_set_source_rgba(c,0.,0.,0.,.5); michael@0: switch(qiv){ michael@0: /*Double plus:*/ michael@0: case 2:{ michael@0: if((bi&1)^((bi&2)>>1)){ michael@0: cairo_move_to(c,xp+2.5,yp+1.5); michael@0: cairo_line_to(c,xp+2.5,yp+3.5); michael@0: cairo_move_to(c,xp+1.5,yp+2.5); michael@0: cairo_line_to(c,xp+3.5,yp+2.5); michael@0: cairo_move_to(c,xp+5.5,yp+4.5); michael@0: cairo_line_to(c,xp+5.5,yp+6.5); michael@0: cairo_move_to(c,xp+4.5,yp+5.5); michael@0: cairo_line_to(c,xp+6.5,yp+5.5); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,0.,1.,1.,1.); michael@0: } michael@0: else{ michael@0: cairo_move_to(c,xp+5.5,yp+1.5); michael@0: cairo_line_to(c,xp+5.5,yp+3.5); michael@0: cairo_move_to(c,xp+4.5,yp+2.5); michael@0: cairo_line_to(c,xp+6.5,yp+2.5); michael@0: cairo_move_to(c,xp+2.5,yp+4.5); michael@0: cairo_line_to(c,xp+2.5,yp+6.5); michael@0: cairo_move_to(c,xp+1.5,yp+5.5); michael@0: cairo_line_to(c,xp+3.5,yp+5.5); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,0.,1.,1.,1.); michael@0: } michael@0: }break; michael@0: /*Double minus:*/ michael@0: case -2:{ michael@0: cairo_move_to(c,xp+2.5,yp+2.5); michael@0: cairo_line_to(c,xp+5.5,yp+2.5); michael@0: cairo_move_to(c,xp+2.5,yp+5.5); michael@0: cairo_line_to(c,xp+5.5,yp+5.5); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,1.,1.,1.,1.); michael@0: }break; michael@0: /*Plus:*/ michael@0: case 1:{ michael@0: if(bi&2==0)yp-=2; michael@0: if(bi&1==0)xp-=2; michael@0: cairo_move_to(c,xp+4.5,yp+2.5); michael@0: cairo_line_to(c,xp+4.5,yp+6.5); michael@0: cairo_move_to(c,xp+2.5,yp+4.5); michael@0: cairo_line_to(c,xp+6.5,yp+4.5); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,.1,1.,.3,1.); michael@0: break; michael@0: } michael@0: /*Fall through.*/ michael@0: /*Minus:*/ michael@0: case -1:{ michael@0: cairo_move_to(c,xp+2.5,yp+4.5); michael@0: cairo_line_to(c,xp+6.5,yp+4.5); michael@0: cairo_stroke_preserve(c); michael@0: cairo_set_source_rgba(c,1.,.3,.1,1.); michael@0: }break; michael@0: default:continue; michael@0: } michael@0: cairo_set_line_width(c,1.); michael@0: cairo_stroke(c); michael@0: } michael@0: } michael@0: } michael@0: col2++; michael@0: if((col2>>1)>=_dec->state.nhmbs){ michael@0: col2=0; michael@0: row2+=2; michael@0: } michael@0: } michael@0: /*Bit usage indicator[s]:*/ michael@0: if(_dec->telemetry_bits){ michael@0: int widths[6]; michael@0: int fpsn; michael@0: int fpsd; michael@0: int mult; michael@0: int fullw; michael@0: int padw; michael@0: int i; michael@0: fpsn=_dec->state.info.fps_numerator; michael@0: fpsd=_dec->state.info.fps_denominator; michael@0: mult=(_dec->telemetry_bits>=0xFF?1:_dec->telemetry_bits); michael@0: fullw=250.f*h*fpsd*mult/fpsn; michael@0: padw=w-24; michael@0: /*Header and coded block bits.*/ michael@0: if(_dec->telemetry_frame_bytes<0|| michael@0: _dec->telemetry_frame_bytes==OC_LOTS_OF_BITS){ michael@0: _dec->telemetry_frame_bytes=0; michael@0: } michael@0: if(_dec->telemetry_coding_bytes<0|| michael@0: _dec->telemetry_coding_bytes>_dec->telemetry_frame_bytes){ michael@0: _dec->telemetry_coding_bytes=0; michael@0: } michael@0: if(_dec->telemetry_mode_bytes<0|| michael@0: _dec->telemetry_mode_bytes>_dec->telemetry_frame_bytes){ michael@0: _dec->telemetry_mode_bytes=0; michael@0: } michael@0: if(_dec->telemetry_mv_bytes<0|| michael@0: _dec->telemetry_mv_bytes>_dec->telemetry_frame_bytes){ michael@0: _dec->telemetry_mv_bytes=0; michael@0: } michael@0: if(_dec->telemetry_qi_bytes<0|| michael@0: _dec->telemetry_qi_bytes>_dec->telemetry_frame_bytes){ michael@0: _dec->telemetry_qi_bytes=0; michael@0: } michael@0: if(_dec->telemetry_dc_bytes<0|| michael@0: _dec->telemetry_dc_bytes>_dec->telemetry_frame_bytes){ michael@0: _dec->telemetry_dc_bytes=0; michael@0: } michael@0: widths[0]=padw*(_dec->telemetry_frame_bytes-_dec->telemetry_coding_bytes)/fullw; michael@0: widths[1]=padw*(_dec->telemetry_coding_bytes-_dec->telemetry_mode_bytes)/fullw; michael@0: widths[2]=padw*(_dec->telemetry_mode_bytes-_dec->telemetry_mv_bytes)/fullw; michael@0: widths[3]=padw*(_dec->telemetry_mv_bytes-_dec->telemetry_qi_bytes)/fullw; michael@0: widths[4]=padw*(_dec->telemetry_qi_bytes-_dec->telemetry_dc_bytes)/fullw; michael@0: widths[5]=padw*(_dec->telemetry_dc_bytes)/fullw; michael@0: for(i=0;i<6;i++)if(widths[i]>w)widths[i]=w; michael@0: cairo_set_source_rgba(c,.0,.0,.0,.6); michael@0: cairo_rectangle(c,10,h-33,widths[0]+1,5); michael@0: cairo_rectangle(c,10,h-29,widths[1]+1,5); michael@0: cairo_rectangle(c,10,h-25,widths[2]+1,5); michael@0: cairo_rectangle(c,10,h-21,widths[3]+1,5); michael@0: cairo_rectangle(c,10,h-17,widths[4]+1,5); michael@0: cairo_rectangle(c,10,h-13,widths[5]+1,5); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,1,0,0); michael@0: cairo_rectangle(c,10.5,h-32.5,widths[0],4); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,0,1,0); michael@0: cairo_rectangle(c,10.5,h-28.5,widths[1],4); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,0,0,1); michael@0: cairo_rectangle(c,10.5,h-24.5,widths[2],4); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,.6,.4,.0); michael@0: cairo_rectangle(c,10.5,h-20.5,widths[3],4); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,.3,.3,.3); michael@0: cairo_rectangle(c,10.5,h-16.5,widths[4],4); michael@0: cairo_fill(c); michael@0: cairo_set_source_rgb(c,.5,.5,.8); michael@0: cairo_rectangle(c,10.5,h-12.5,widths[5],4); michael@0: cairo_fill(c); michael@0: } michael@0: /*Master qi indicator[s]:*/ michael@0: if(_dec->telemetry_qi&0x1){ michael@0: cairo_text_extents_t extents; michael@0: char buffer[10]; michael@0: int p; michael@0: int y; michael@0: p=0; michael@0: y=h-7.5; michael@0: if(_dec->state.qis[0]>=10)buffer[p++]=48+_dec->state.qis[0]/10; michael@0: buffer[p++]=48+_dec->state.qis[0]%10; michael@0: if(_dec->state.nqis>=2){ michael@0: buffer[p++]=' '; michael@0: if(_dec->state.qis[1]>=10)buffer[p++]=48+_dec->state.qis[1]/10; michael@0: buffer[p++]=48+_dec->state.qis[1]%10; michael@0: } michael@0: if(_dec->state.nqis==3){ michael@0: buffer[p++]=' '; michael@0: if(_dec->state.qis[2]>=10)buffer[p++]=48+_dec->state.qis[2]/10; michael@0: buffer[p++]=48+_dec->state.qis[2]%10; michael@0: } michael@0: buffer[p++]='\0'; michael@0: cairo_select_font_face(c,"sans", michael@0: CAIRO_FONT_SLANT_NORMAL,CAIRO_FONT_WEIGHT_BOLD); michael@0: cairo_set_font_size(c,18); michael@0: cairo_text_extents(c,buffer,&extents); michael@0: cairo_set_source_rgb(c,1,1,1); michael@0: cairo_move_to(c,w-extents.x_advance-10,y); michael@0: cairo_show_text(c,buffer); michael@0: cairo_set_source_rgb(c,0,0,0); michael@0: cairo_move_to(c,w-extents.x_advance-10,y); michael@0: cairo_text_path(c,buffer); michael@0: cairo_set_line_width(c,.8); michael@0: cairo_set_line_join(c,CAIRO_LINE_JOIN_ROUND); michael@0: cairo_stroke(c); michael@0: } michael@0: cairo_destroy(c); michael@0: } michael@0: /*Out of the Cairo plane into the telemetry YUV buffer.*/ michael@0: _ycbcr[0].data=_dec->telemetry_frame_data; michael@0: _ycbcr[0].stride=_ycbcr[0].width; michael@0: _ycbcr[1].data=_ycbcr[0].data+h*_ycbcr[0].stride; michael@0: _ycbcr[1].stride=_ycbcr[1].width; michael@0: _ycbcr[2].data=_ycbcr[1].data+(h>>vdec)*_ycbcr[1].stride; michael@0: _ycbcr[2].stride=_ycbcr[2].width; michael@0: y_row=_ycbcr[0].data; michael@0: u_row=_ycbcr[1].data; michael@0: v_row=_ycbcr[2].data; michael@0: rgb_row=data; michael@0: /*This is one of the few places it's worth handling chroma on a michael@0: case-by-case basis.*/ michael@0: switch(_dec->state.info.pixel_fmt){ michael@0: case TH_PF_420:{ michael@0: for(y=0;y>1]=OC_CLAMP255(u); michael@0: v_row[x>>1]=OC_CLAMP255(v); michael@0: } michael@0: y_row+=_ycbcr[0].stride<<1; michael@0: u_row+=_ycbcr[1].stride; michael@0: v_row+=_ycbcr[2].stride; michael@0: rgb_row+=cstride<<1; michael@0: } michael@0: }break; michael@0: case TH_PF_422:{ michael@0: for(y=0;y>1]=OC_CLAMP255(u); michael@0: v_row[x>>1]=OC_CLAMP255(v); michael@0: } michael@0: y_row+=_ycbcr[0].stride; michael@0: u_row+=_ycbcr[1].stride; michael@0: v_row+=_ycbcr[2].stride; michael@0: rgb_row+=cstride; michael@0: } michael@0: }break; michael@0: /*case TH_PF_444:*/ michael@0: default:{ michael@0: for(y=0;y