00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "get_bits.h"
00033 #include "put_bits.h"
00034 #include "dsputil.h"
00035 #include "thread.h"
00036
00037 #define VLC_BITS 11
00038
00039 #if HAVE_BIGENDIAN
00040 #define B 3
00041 #define G 2
00042 #define R 1
00043 #define A 0
00044 #else
00045 #define B 0
00046 #define G 1
00047 #define R 2
00048 #define A 3
00049 #endif
00050
00051 typedef enum Predictor{
00052 LEFT= 0,
00053 PLANE,
00054 MEDIAN,
00055 } Predictor;
00056
00057 typedef struct HYuvContext{
00058 AVCodecContext *avctx;
00059 Predictor predictor;
00060 GetBitContext gb;
00061 PutBitContext pb;
00062 int interlaced;
00063 int decorrelate;
00064 int bitstream_bpp;
00065 int version;
00066 int yuy2;
00067 int bgr32;
00068 int width, height;
00069 int flags;
00070 int context;
00071 int picture_number;
00072 int last_slice_end;
00073 uint8_t *temp[3];
00074 uint64_t stats[3][256];
00075 uint8_t len[3][256];
00076 uint32_t bits[3][256];
00077 uint32_t pix_bgr_map[1<<VLC_BITS];
00078 VLC vlc[6];
00079 AVFrame picture;
00080 uint8_t *bitstream_buffer;
00081 unsigned int bitstream_buffer_size;
00082 DSPContext dsp;
00083 }HYuvContext;
00084
00085 #define classic_shift_luma_table_size 42
00086 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00087 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00088 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00089 69,68, 0
00090 };
00091
00092 #define classic_shift_chroma_table_size 59
00093 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00094 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00095 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00096 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
00097 };
00098
00099 static const unsigned char classic_add_luma[256] = {
00100 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00101 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00102 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00103 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00104 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00105 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00106 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00107 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00108 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00109 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00110 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00111 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00112 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00113 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00114 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00115 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00116 };
00117
00118 static const unsigned char classic_add_chroma[256] = {
00119 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00120 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00121 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00122 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00123 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00124 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00125 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00126 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00127 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00128 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00129 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00130 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00131 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00132 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00133 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00134 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00135 };
00136
00137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
00138 int i;
00139 if(w<32){
00140 for(i=0; i<w; i++){
00141 const int temp= src[i];
00142 dst[i]= temp - left;
00143 left= temp;
00144 }
00145 return left;
00146 }else{
00147 for(i=0; i<16; i++){
00148 const int temp= src[i];
00149 dst[i]= temp - left;
00150 left= temp;
00151 }
00152 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
00153 return src[w-1];
00154 }
00155 }
00156
00157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
00158 int i;
00159 int r,g,b;
00160 r= *red;
00161 g= *green;
00162 b= *blue;
00163 for(i=0; i<FFMIN(w,4); i++){
00164 const int rt= src[i*4+R];
00165 const int gt= src[i*4+G];
00166 const int bt= src[i*4+B];
00167 dst[i*4+R]= rt - r;
00168 dst[i*4+G]= gt - g;
00169 dst[i*4+B]= bt - b;
00170 r = rt;
00171 g = gt;
00172 b = bt;
00173 }
00174 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
00175 *red= src[(w-1)*4+R];
00176 *green= src[(w-1)*4+G];
00177 *blue= src[(w-1)*4+B];
00178 }
00179
00180 static int read_len_table(uint8_t *dst, GetBitContext *gb){
00181 int i, val, repeat;
00182
00183 for(i=0; i<256;){
00184 repeat= get_bits(gb, 3);
00185 val = get_bits(gb, 5);
00186 if(repeat==0)
00187 repeat= get_bits(gb, 8);
00188
00189 if(i+repeat > 256 || get_bits_left(gb) < 0) {
00190 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00191 return -1;
00192 }
00193 while (repeat--)
00194 dst[i++] = val;
00195 }
00196 return 0;
00197 }
00198
00199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
00200 int len, index;
00201 uint32_t bits=0;
00202
00203 for(len=32; len>0; len--){
00204 for(index=0; index<256; index++){
00205 if(len_table[index]==len)
00206 dst[index]= bits++;
00207 }
00208 if(bits & 1){
00209 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00210 return -1;
00211 }
00212 bits >>= 1;
00213 }
00214 return 0;
00215 }
00216
00217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00218 typedef struct {
00219 uint64_t val;
00220 int name;
00221 } HeapElem;
00222
00223 static void heap_sift(HeapElem *h, int root, int size)
00224 {
00225 while(root*2+1 < size) {
00226 int child = root*2+1;
00227 if(child < size-1 && h[child].val > h[child+1].val)
00228 child++;
00229 if(h[root].val > h[child].val) {
00230 FFSWAP(HeapElem, h[root], h[child]);
00231 root = child;
00232 } else
00233 break;
00234 }
00235 }
00236
00237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
00238 HeapElem h[256];
00239 int up[2*256];
00240 int len[2*256];
00241 int offset, i, next;
00242 int size = 256;
00243
00244 for(offset=1; ; offset<<=1){
00245 for(i=0; i<size; i++){
00246 h[i].name = i;
00247 h[i].val = (stats[i] << 8) + offset;
00248 }
00249 for(i=size/2-1; i>=0; i--)
00250 heap_sift(h, i, size);
00251
00252 for(next=size; next<size*2-1; next++){
00253
00254 uint64_t min1v = h[0].val;
00255 up[h[0].name] = next;
00256 h[0].val = INT64_MAX;
00257 heap_sift(h, 0, size);
00258 up[h[0].name] = next;
00259 h[0].name = next;
00260 h[0].val += min1v;
00261 heap_sift(h, 0, size);
00262 }
00263
00264 len[2*size-2] = 0;
00265 for(i=2*size-3; i>=size; i--)
00266 len[i] = len[up[i]] + 1;
00267 for(i=0; i<size; i++) {
00268 dst[i] = len[up[i]] + 1;
00269 if(dst[i] >= 32) break;
00270 }
00271 if(i==size) break;
00272 }
00273 }
00274 #endif
00275
00276 static void generate_joint_tables(HYuvContext *s){
00277 uint16_t symbols[1<<VLC_BITS];
00278 uint16_t bits[1<<VLC_BITS];
00279 uint8_t len[1<<VLC_BITS];
00280 if(s->bitstream_bpp < 24){
00281 int p, i, y, u;
00282 for(p=0; p<3; p++){
00283 for(i=y=0; y<256; y++){
00284 int len0 = s->len[0][y];
00285 int limit = VLC_BITS - len0;
00286 if(limit <= 0)
00287 continue;
00288 for(u=0; u<256; u++){
00289 int len1 = s->len[p][u];
00290 if(len1 > limit)
00291 continue;
00292 len[i] = len0 + len1;
00293 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00294 symbols[i] = (y<<8) + u;
00295 if(symbols[i] != 0xffff)
00296 i++;
00297 }
00298 }
00299 ff_free_vlc(&s->vlc[3+p]);
00300 ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
00301 }
00302 }else{
00303 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00304 int i, b, g, r, code;
00305 int p0 = s->decorrelate;
00306 int p1 = !s->decorrelate;
00307
00308
00309
00310 for(i=0, g=-16; g<16; g++){
00311 int len0 = s->len[p0][g&255];
00312 int limit0 = VLC_BITS - len0;
00313 if(limit0 < 2)
00314 continue;
00315 for(b=-16; b<16; b++){
00316 int len1 = s->len[p1][b&255];
00317 int limit1 = limit0 - len1;
00318 if(limit1 < 1)
00319 continue;
00320 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
00321 for(r=-16; r<16; r++){
00322 int len2 = s->len[2][r&255];
00323 if(len2 > limit1)
00324 continue;
00325 len[i] = len0 + len1 + len2;
00326 bits[i] = (code << len2) + s->bits[2][r&255];
00327 if(s->decorrelate){
00328 map[i][G] = g;
00329 map[i][B] = g+b;
00330 map[i][R] = g+r;
00331 }else{
00332 map[i][B] = g;
00333 map[i][G] = b;
00334 map[i][R] = r;
00335 }
00336 i++;
00337 }
00338 }
00339 }
00340 ff_free_vlc(&s->vlc[3]);
00341 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
00342 }
00343 }
00344
00345 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
00346 GetBitContext gb;
00347 int i;
00348
00349 init_get_bits(&gb, src, length*8);
00350
00351 for(i=0; i<3; i++){
00352 if(read_len_table(s->len[i], &gb)<0)
00353 return -1;
00354 if(generate_bits_table(s->bits[i], s->len[i])<0){
00355 return -1;
00356 }
00357 ff_free_vlc(&s->vlc[i]);
00358 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00359 }
00360
00361 generate_joint_tables(s);
00362
00363 return (get_bits_count(&gb)+7)/8;
00364 }
00365
00366 static int read_old_huffman_tables(HYuvContext *s){
00367 #if 1
00368 GetBitContext gb;
00369 int i;
00370
00371 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
00372 if(read_len_table(s->len[0], &gb)<0)
00373 return -1;
00374 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
00375 if(read_len_table(s->len[1], &gb)<0)
00376 return -1;
00377
00378 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00379 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00380
00381 if(s->bitstream_bpp >= 24){
00382 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
00383 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
00384 }
00385 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
00386 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
00387
00388 for(i=0; i<3; i++){
00389 ff_free_vlc(&s->vlc[i]);
00390 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00391 }
00392
00393 generate_joint_tables(s);
00394
00395 return 0;
00396 #else
00397 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
00398 return -1;
00399 #endif
00400 }
00401
00402 static av_cold void alloc_temp(HYuvContext *s){
00403 int i;
00404
00405 if(s->bitstream_bpp<24){
00406 for(i=0; i<3; i++){
00407 s->temp[i]= av_malloc(s->width + 16);
00408 }
00409 }else{
00410 s->temp[0]= av_mallocz(4*s->width + 16);
00411 }
00412 }
00413
00414 static av_cold int common_init(AVCodecContext *avctx){
00415 HYuvContext *s = avctx->priv_data;
00416
00417 s->avctx= avctx;
00418 s->flags= avctx->flags;
00419
00420 dsputil_init(&s->dsp, avctx);
00421
00422 s->width= avctx->width;
00423 s->height= avctx->height;
00424 assert(s->width>0 && s->height>0);
00425
00426 return 0;
00427 }
00428
00429 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00430 static av_cold int decode_init(AVCodecContext *avctx)
00431 {
00432 HYuvContext *s = avctx->priv_data;
00433
00434 common_init(avctx);
00435 memset(s->vlc, 0, 3*sizeof(VLC));
00436
00437 avctx->coded_frame= &s->picture;
00438 s->interlaced= s->height > 288;
00439
00440 s->bgr32=1;
00441
00442
00443 if(avctx->extradata_size){
00444 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
00445 s->version=1;
00446 else
00447 s->version=2;
00448 }else
00449 s->version=0;
00450
00451 if(s->version==2){
00452 int method, interlace;
00453
00454 if (avctx->extradata_size < 4)
00455 return -1;
00456
00457 method= ((uint8_t*)avctx->extradata)[0];
00458 s->decorrelate= method&64 ? 1 : 0;
00459 s->predictor= method&63;
00460 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
00461 if(s->bitstream_bpp==0)
00462 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
00463 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00464 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
00465 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00466
00467 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
00468 return -1;
00469 }else{
00470 switch(avctx->bits_per_coded_sample&7){
00471 case 1:
00472 s->predictor= LEFT;
00473 s->decorrelate= 0;
00474 break;
00475 case 2:
00476 s->predictor= LEFT;
00477 s->decorrelate= 1;
00478 break;
00479 case 3:
00480 s->predictor= PLANE;
00481 s->decorrelate= avctx->bits_per_coded_sample >= 24;
00482 break;
00483 case 4:
00484 s->predictor= MEDIAN;
00485 s->decorrelate= 0;
00486 break;
00487 default:
00488 s->predictor= LEFT;
00489 s->decorrelate= 0;
00490 break;
00491 }
00492 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
00493 s->context= 0;
00494
00495 if(read_old_huffman_tables(s) < 0)
00496 return -1;
00497 }
00498
00499 switch(s->bitstream_bpp){
00500 case 12:
00501 avctx->pix_fmt = PIX_FMT_YUV420P;
00502 break;
00503 case 16:
00504 if(s->yuy2){
00505 avctx->pix_fmt = PIX_FMT_YUYV422;
00506 }else{
00507 avctx->pix_fmt = PIX_FMT_YUV422P;
00508 }
00509 break;
00510 case 24:
00511 case 32:
00512 if(s->bgr32){
00513 avctx->pix_fmt = PIX_FMT_RGB32;
00514 }else{
00515 avctx->pix_fmt = PIX_FMT_BGR24;
00516 }
00517 break;
00518 default:
00519 return AVERROR_INVALIDDATA;
00520 }
00521
00522 alloc_temp(s);
00523
00524
00525
00526 return 0;
00527 }
00528
00529 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00530 {
00531 HYuvContext *s = avctx->priv_data;
00532 int i;
00533
00534 avctx->coded_frame= &s->picture;
00535 alloc_temp(s);
00536
00537 for (i = 0; i < 6; i++)
00538 s->vlc[i].table = NULL;
00539
00540 if(s->version==2){
00541 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
00542 return -1;
00543 }else{
00544 if(read_old_huffman_tables(s) < 0)
00545 return -1;
00546 }
00547
00548 return 0;
00549 }
00550 #endif
00551
00552 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00553 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
00554 int i;
00555 int index= 0;
00556
00557 for(i=0; i<256;){
00558 int val= len[i];
00559 int repeat=0;
00560
00561 for(; i<256 && len[i]==val && repeat<255; i++)
00562 repeat++;
00563
00564 assert(val < 32 && val >0 && repeat<256 && repeat>0);
00565 if(repeat>7){
00566 buf[index++]= val;
00567 buf[index++]= repeat;
00568 }else{
00569 buf[index++]= val | (repeat<<5);
00570 }
00571 }
00572
00573 return index;
00574 }
00575
00576 static av_cold int encode_init(AVCodecContext *avctx)
00577 {
00578 HYuvContext *s = avctx->priv_data;
00579 int i, j;
00580
00581 common_init(avctx);
00582
00583 avctx->extradata= av_mallocz(1024*30);
00584 avctx->stats_out= av_mallocz(1024*30);
00585 s->version=2;
00586
00587 avctx->coded_frame= &s->picture;
00588
00589 switch(avctx->pix_fmt){
00590 case PIX_FMT_YUV420P:
00591 s->bitstream_bpp= 12;
00592 break;
00593 case PIX_FMT_YUV422P:
00594 s->bitstream_bpp= 16;
00595 break;
00596 case PIX_FMT_RGB32:
00597 s->bitstream_bpp= 24;
00598 break;
00599 default:
00600 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00601 return -1;
00602 }
00603 avctx->bits_per_coded_sample= s->bitstream_bpp;
00604 s->decorrelate= s->bitstream_bpp >= 24;
00605 s->predictor= avctx->prediction_method;
00606 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00607 if(avctx->context_model==1){
00608 s->context= avctx->context_model;
00609 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
00610 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
00611 return -1;
00612 }
00613 }else s->context= 0;
00614
00615 if(avctx->codec->id==CODEC_ID_HUFFYUV){
00616 if(avctx->pix_fmt==PIX_FMT_YUV420P){
00617 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
00618 return -1;
00619 }
00620 if(avctx->context_model){
00621 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
00622 return -1;
00623 }
00624 if(s->interlaced != ( s->height > 288 ))
00625 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
00626 }
00627
00628 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
00629 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
00630 return -1;
00631 }
00632
00633 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
00634 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
00635 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
00636 if(s->context)
00637 ((uint8_t*)avctx->extradata)[2]|= 0x40;
00638 ((uint8_t*)avctx->extradata)[3]= 0;
00639 s->avctx->extradata_size= 4;
00640
00641 if(avctx->stats_in){
00642 char *p= avctx->stats_in;
00643
00644 for(i=0; i<3; i++)
00645 for(j=0; j<256; j++)
00646 s->stats[i][j]= 1;
00647
00648 for(;;){
00649 for(i=0; i<3; i++){
00650 char *next;
00651
00652 for(j=0; j<256; j++){
00653 s->stats[i][j]+= strtol(p, &next, 0);
00654 if(next==p) return -1;
00655 p=next;
00656 }
00657 }
00658 if(p[0]==0 || p[1]==0 || p[2]==0) break;
00659 }
00660 }else{
00661 for(i=0; i<3; i++)
00662 for(j=0; j<256; j++){
00663 int d= FFMIN(j, 256-j);
00664
00665 s->stats[i][j]= 100000000/(d+1);
00666 }
00667 }
00668
00669 for(i=0; i<3; i++){
00670 generate_len_table(s->len[i], s->stats[i]);
00671
00672 if(generate_bits_table(s->bits[i], s->len[i])<0){
00673 return -1;
00674 }
00675
00676 s->avctx->extradata_size+=
00677 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00678 }
00679
00680 if(s->context){
00681 for(i=0; i<3; i++){
00682 int pels = s->width*s->height / (i?40:10);
00683 for(j=0; j<256; j++){
00684 int d= FFMIN(j, 256-j);
00685 s->stats[i][j]= pels/(d+1);
00686 }
00687 }
00688 }else{
00689 for(i=0; i<3; i++)
00690 for(j=0; j<256; j++)
00691 s->stats[i][j]= 0;
00692 }
00693
00694
00695
00696 alloc_temp(s);
00697
00698 s->picture_number=0;
00699
00700 return 0;
00701 }
00702 #endif
00703
00704
00705
00706 #define READ_2PIX(dst0, dst1, plane1){\
00707 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00708 if(code != 0xffff){\
00709 dst0 = code>>8;\
00710 dst1 = code;\
00711 }else{\
00712 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00713 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00714 }\
00715 }
00716
00717 static void decode_422_bitstream(HYuvContext *s, int count){
00718 int i;
00719
00720 count/=2;
00721
00722 if(count >= (get_bits_left(&s->gb))/(31*4)){
00723 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00724 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00725 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00726 }
00727 }else{
00728 for(i=0; i<count; i++){
00729 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00730 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00731 }
00732 }
00733 }
00734
00735 static void decode_gray_bitstream(HYuvContext *s, int count){
00736 int i;
00737
00738 count/=2;
00739
00740 if(count >= (get_bits_left(&s->gb))/(31*2)){
00741 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00742 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00743 }
00744 }else{
00745 for(i=0; i<count; i++){
00746 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00747 }
00748 }
00749 }
00750
00751 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00752 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
00753 int i;
00754 const uint8_t *y = s->temp[0] + offset;
00755 const uint8_t *u = s->temp[1] + offset/2;
00756 const uint8_t *v = s->temp[2] + offset/2;
00757
00758 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
00759 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00760 return -1;
00761 }
00762
00763 #define LOAD4\
00764 int y0 = y[2*i];\
00765 int y1 = y[2*i+1];\
00766 int u0 = u[i];\
00767 int v0 = v[i];
00768
00769 count/=2;
00770 if(s->flags&CODEC_FLAG_PASS1){
00771 for(i=0; i<count; i++){
00772 LOAD4;
00773 s->stats[0][y0]++;
00774 s->stats[1][u0]++;
00775 s->stats[0][y1]++;
00776 s->stats[2][v0]++;
00777 }
00778 }
00779 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00780 return 0;
00781 if(s->context){
00782 for(i=0; i<count; i++){
00783 LOAD4;
00784 s->stats[0][y0]++;
00785 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00786 s->stats[1][u0]++;
00787 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00788 s->stats[0][y1]++;
00789 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00790 s->stats[2][v0]++;
00791 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00792 }
00793 }else{
00794 for(i=0; i<count; i++){
00795 LOAD4;
00796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00797 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00798 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00799 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00800 }
00801 }
00802 return 0;
00803 }
00804
00805 static int encode_gray_bitstream(HYuvContext *s, int count){
00806 int i;
00807
00808 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
00809 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00810 return -1;
00811 }
00812
00813 #define LOAD2\
00814 int y0 = s->temp[0][2*i];\
00815 int y1 = s->temp[0][2*i+1];
00816 #define STAT2\
00817 s->stats[0][y0]++;\
00818 s->stats[0][y1]++;
00819 #define WRITE2\
00820 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00821 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00822
00823 count/=2;
00824 if(s->flags&CODEC_FLAG_PASS1){
00825 for(i=0; i<count; i++){
00826 LOAD2;
00827 STAT2;
00828 }
00829 }
00830 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00831 return 0;
00832
00833 if(s->context){
00834 for(i=0; i<count; i++){
00835 LOAD2;
00836 STAT2;
00837 WRITE2;
00838 }
00839 }else{
00840 for(i=0; i<count; i++){
00841 LOAD2;
00842 WRITE2;
00843 }
00844 }
00845 return 0;
00846 }
00847 #endif
00848
00849 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
00850 int i;
00851 for(i=0; i<count; i++){
00852 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00853 if(code != -1){
00854 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
00855 }else if(decorrelate){
00856 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00857 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00858 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00859 }else{
00860 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00861 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00862 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00863 }
00864 if(alpha)
00865 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00866 }
00867 }
00868
00869 static void decode_bgr_bitstream(HYuvContext *s, int count){
00870 if(s->decorrelate){
00871 if(s->bitstream_bpp==24)
00872 decode_bgr_1(s, count, 1, 0);
00873 else
00874 decode_bgr_1(s, count, 1, 1);
00875 }else{
00876 if(s->bitstream_bpp==24)
00877 decode_bgr_1(s, count, 0, 0);
00878 else
00879 decode_bgr_1(s, count, 0, 1);
00880 }
00881 }
00882
00883 static int encode_bgr_bitstream(HYuvContext *s, int count){
00884 int i;
00885
00886 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
00887 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00888 return -1;
00889 }
00890
00891 #define LOAD3\
00892 int g= s->temp[0][4*i+G];\
00893 int b= (s->temp[0][4*i+B] - g) & 0xff;\
00894 int r= (s->temp[0][4*i+R] - g) & 0xff;
00895 #define STAT3\
00896 s->stats[0][b]++;\
00897 s->stats[1][g]++;\
00898 s->stats[2][r]++;
00899 #define WRITE3\
00900 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00901 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00902 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
00903
00904 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
00905 for(i=0; i<count; i++){
00906 LOAD3;
00907 STAT3;
00908 }
00909 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
00910 for(i=0; i<count; i++){
00911 LOAD3;
00912 STAT3;
00913 WRITE3;
00914 }
00915 }else{
00916 for(i=0; i<count; i++){
00917 LOAD3;
00918 WRITE3;
00919 }
00920 }
00921 return 0;
00922 }
00923
00924 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00925 static void draw_slice(HYuvContext *s, int y){
00926 int h, cy, i;
00927 int offset[AV_NUM_DATA_POINTERS];
00928
00929 if(s->avctx->draw_horiz_band==NULL)
00930 return;
00931
00932 h= y - s->last_slice_end;
00933 y -= h;
00934
00935 if(s->bitstream_bpp==12){
00936 cy= y>>1;
00937 }else{
00938 cy= y;
00939 }
00940
00941 offset[0] = s->picture.linesize[0]*y;
00942 offset[1] = s->picture.linesize[1]*cy;
00943 offset[2] = s->picture.linesize[2]*cy;
00944 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
00945 offset[i] = 0;
00946 emms_c();
00947
00948 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
00949
00950 s->last_slice_end= y + h;
00951 }
00952
00953 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
00954 const uint8_t *buf = avpkt->data;
00955 int buf_size = avpkt->size;
00956 HYuvContext *s = avctx->priv_data;
00957 const int width= s->width;
00958 const int width2= s->width>>1;
00959 const int height= s->height;
00960 int fake_ystride, fake_ustride, fake_vstride;
00961 AVFrame * const p= &s->picture;
00962 int table_size= 0;
00963
00964 AVFrame *picture = data;
00965
00966 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
00967 if (!s->bitstream_buffer)
00968 return AVERROR(ENOMEM);
00969
00970 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00971 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
00972
00973 if(p->data[0])
00974 ff_thread_release_buffer(avctx, p);
00975
00976 p->reference= 0;
00977 if(ff_thread_get_buffer(avctx, p) < 0){
00978 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
00979 return -1;
00980 }
00981
00982 if(s->context){
00983 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
00984 if(table_size < 0)
00985 return -1;
00986 }
00987
00988 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
00989 return -1;
00990
00991 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
00992
00993 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
00994 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
00995 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
00996
00997 s->last_slice_end= 0;
00998
00999 if(s->bitstream_bpp<24){
01000 int y, cy;
01001 int lefty, leftu, leftv;
01002 int lefttopy, lefttopu, lefttopv;
01003
01004 if(s->yuy2){
01005 p->data[0][3]= get_bits(&s->gb, 8);
01006 p->data[0][2]= get_bits(&s->gb, 8);
01007 p->data[0][1]= get_bits(&s->gb, 8);
01008 p->data[0][0]= get_bits(&s->gb, 8);
01009
01010 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
01011 return -1;
01012 }else{
01013
01014 leftv= p->data[2][0]= get_bits(&s->gb, 8);
01015 lefty= p->data[0][1]= get_bits(&s->gb, 8);
01016 leftu= p->data[1][0]= get_bits(&s->gb, 8);
01017 p->data[0][0]= get_bits(&s->gb, 8);
01018
01019 switch(s->predictor){
01020 case LEFT:
01021 case PLANE:
01022 decode_422_bitstream(s, width-2);
01023 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01024 if(!(s->flags&CODEC_FLAG_GRAY)){
01025 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01026 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01027 }
01028
01029 for(cy=y=1; y<s->height; y++,cy++){
01030 uint8_t *ydst, *udst, *vdst;
01031
01032 if(s->bitstream_bpp==12){
01033 decode_gray_bitstream(s, width);
01034
01035 ydst= p->data[0] + p->linesize[0]*y;
01036
01037 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01038 if(s->predictor == PLANE){
01039 if(y>s->interlaced)
01040 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01041 }
01042 y++;
01043 if(y>=s->height) break;
01044 }
01045
01046 draw_slice(s, y);
01047
01048 ydst= p->data[0] + p->linesize[0]*y;
01049 udst= p->data[1] + p->linesize[1]*cy;
01050 vdst= p->data[2] + p->linesize[2]*cy;
01051
01052 decode_422_bitstream(s, width);
01053 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01054 if(!(s->flags&CODEC_FLAG_GRAY)){
01055 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01056 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01057 }
01058 if(s->predictor == PLANE){
01059 if(cy>s->interlaced){
01060 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01061 if(!(s->flags&CODEC_FLAG_GRAY)){
01062 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01063 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01064 }
01065 }
01066 }
01067 }
01068 draw_slice(s, height);
01069
01070 break;
01071 case MEDIAN:
01072
01073 decode_422_bitstream(s, width-2);
01074 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01075 if(!(s->flags&CODEC_FLAG_GRAY)){
01076 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01077 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01078 }
01079
01080 cy=y=1;
01081
01082
01083 if(s->interlaced){
01084 decode_422_bitstream(s, width);
01085 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01086 if(!(s->flags&CODEC_FLAG_GRAY)){
01087 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01088 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01089 }
01090 y++; cy++;
01091 }
01092
01093
01094 decode_422_bitstream(s, 4);
01095 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01096 if(!(s->flags&CODEC_FLAG_GRAY)){
01097 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01098 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01099 }
01100
01101
01102 lefttopy= p->data[0][3];
01103 decode_422_bitstream(s, width-4);
01104 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01105 if(!(s->flags&CODEC_FLAG_GRAY)){
01106 lefttopu= p->data[1][1];
01107 lefttopv= p->data[2][1];
01108 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
01109 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
01110 }
01111 y++; cy++;
01112
01113 for(; y<height; y++,cy++){
01114 uint8_t *ydst, *udst, *vdst;
01115
01116 if(s->bitstream_bpp==12){
01117 while(2*cy > y){
01118 decode_gray_bitstream(s, width);
01119 ydst= p->data[0] + p->linesize[0]*y;
01120 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01121 y++;
01122 }
01123 if(y>=height) break;
01124 }
01125 draw_slice(s, y);
01126
01127 decode_422_bitstream(s, width);
01128
01129 ydst= p->data[0] + p->linesize[0]*y;
01130 udst= p->data[1] + p->linesize[1]*cy;
01131 vdst= p->data[2] + p->linesize[2]*cy;
01132
01133 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01134 if(!(s->flags&CODEC_FLAG_GRAY)){
01135 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01136 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01137 }
01138 }
01139
01140 draw_slice(s, height);
01141 break;
01142 }
01143 }
01144 }else{
01145 int y;
01146 int leftr, leftg, leftb, lefta;
01147 const int last_line= (height-1)*p->linesize[0];
01148
01149 if(s->bitstream_bpp==32){
01150 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
01151 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01152 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01153 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01154 }else{
01155 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01156 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01157 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01158 lefta= p->data[0][last_line+A]= 255;
01159 skip_bits(&s->gb, 8);
01160 }
01161
01162 if(s->bgr32){
01163 switch(s->predictor){
01164 case LEFT:
01165 case PLANE:
01166 decode_bgr_bitstream(s, width-1);
01167 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
01168
01169 for(y=s->height-2; y>=0; y--){
01170 decode_bgr_bitstream(s, width);
01171
01172 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01173 if(s->predictor == PLANE){
01174 if(s->bitstream_bpp!=32) lefta=0;
01175 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
01176 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
01177 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
01178 }
01179 }
01180 }
01181 draw_slice(s, height);
01182 break;
01183 default:
01184 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
01185 }
01186 }else{
01187
01188 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
01189 return -1;
01190 }
01191 }
01192 emms_c();
01193
01194 *picture= *p;
01195 *data_size = sizeof(AVFrame);
01196
01197 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
01198 }
01199 #endif
01200
01201 static int common_end(HYuvContext *s){
01202 int i;
01203
01204 for(i=0; i<3; i++){
01205 av_freep(&s->temp[i]);
01206 }
01207 return 0;
01208 }
01209
01210 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01211 static av_cold int decode_end(AVCodecContext *avctx)
01212 {
01213 HYuvContext *s = avctx->priv_data;
01214 int i;
01215
01216 if (s->picture.data[0])
01217 avctx->release_buffer(avctx, &s->picture);
01218
01219 common_end(s);
01220 av_freep(&s->bitstream_buffer);
01221
01222 for(i=0; i<6; i++){
01223 ff_free_vlc(&s->vlc[i]);
01224 }
01225
01226 return 0;
01227 }
01228 #endif
01229
01230 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01231 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
01232 HYuvContext *s = avctx->priv_data;
01233 AVFrame *pict = data;
01234 const int width= s->width;
01235 const int width2= s->width>>1;
01236 const int height= s->height;
01237 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01238 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01239 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01240 AVFrame * const p= &s->picture;
01241 int i, j, size=0;
01242
01243 *p = *pict;
01244 p->pict_type= AV_PICTURE_TYPE_I;
01245 p->key_frame= 1;
01246
01247 if(s->context){
01248 for(i=0; i<3; i++){
01249 generate_len_table(s->len[i], s->stats[i]);
01250 if(generate_bits_table(s->bits[i], s->len[i])<0)
01251 return -1;
01252 size+= store_table(s, s->len[i], &buf[size]);
01253 }
01254
01255 for(i=0; i<3; i++)
01256 for(j=0; j<256; j++)
01257 s->stats[i][j] >>= 1;
01258 }
01259
01260 init_put_bits(&s->pb, buf+size, buf_size-size);
01261
01262 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
01263 int lefty, leftu, leftv, y, cy;
01264
01265 put_bits(&s->pb, 8, leftv= p->data[2][0]);
01266 put_bits(&s->pb, 8, lefty= p->data[0][1]);
01267 put_bits(&s->pb, 8, leftu= p->data[1][0]);
01268 put_bits(&s->pb, 8, p->data[0][0]);
01269
01270 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01271 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01272 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01273
01274 encode_422_bitstream(s, 2, width-2);
01275
01276 if(s->predictor==MEDIAN){
01277 int lefttopy, lefttopu, lefttopv;
01278 cy=y=1;
01279 if(s->interlaced){
01280 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
01281 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
01282 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
01283
01284 encode_422_bitstream(s, 0, width);
01285 y++; cy++;
01286 }
01287
01288 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
01289 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
01290 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
01291
01292 encode_422_bitstream(s, 0, 4);
01293
01294 lefttopy= p->data[0][3];
01295 lefttopu= p->data[1][1];
01296 lefttopv= p->data[2][1];
01297 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
01298 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
01299 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
01300 encode_422_bitstream(s, 0, width-4);
01301 y++; cy++;
01302
01303 for(; y<height; y++,cy++){
01304 uint8_t *ydst, *udst, *vdst;
01305
01306 if(s->bitstream_bpp==12){
01307 while(2*cy > y){
01308 ydst= p->data[0] + p->linesize[0]*y;
01309 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01310 encode_gray_bitstream(s, width);
01311 y++;
01312 }
01313 if(y>=height) break;
01314 }
01315 ydst= p->data[0] + p->linesize[0]*y;
01316 udst= p->data[1] + p->linesize[1]*cy;
01317 vdst= p->data[2] + p->linesize[2]*cy;
01318
01319 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01320 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01321 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01322
01323 encode_422_bitstream(s, 0, width);
01324 }
01325 }else{
01326 for(cy=y=1; y<height; y++,cy++){
01327 uint8_t *ydst, *udst, *vdst;
01328
01329
01330 if(s->bitstream_bpp==12){
01331 ydst= p->data[0] + p->linesize[0]*y;
01332
01333 if(s->predictor == PLANE && s->interlaced < y){
01334 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01335
01336 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01337 }else{
01338 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01339 }
01340 encode_gray_bitstream(s, width);
01341 y++;
01342 if(y>=height) break;
01343 }
01344
01345 ydst= p->data[0] + p->linesize[0]*y;
01346 udst= p->data[1] + p->linesize[1]*cy;
01347 vdst= p->data[2] + p->linesize[2]*cy;
01348
01349 if(s->predictor == PLANE && s->interlaced < cy){
01350 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01351 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01352 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01353
01354 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01355 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01356 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01357 }else{
01358 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01359 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01360 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01361 }
01362
01363 encode_422_bitstream(s, 0, width);
01364 }
01365 }
01366 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
01367 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01368 const int stride = -p->linesize[0];
01369 const int fake_stride = -fake_ystride;
01370 int y;
01371 int leftr, leftg, leftb;
01372
01373 put_bits(&s->pb, 8, leftr= data[R]);
01374 put_bits(&s->pb, 8, leftg= data[G]);
01375 put_bits(&s->pb, 8, leftb= data[B]);
01376 put_bits(&s->pb, 8, 0);
01377
01378 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
01379 encode_bgr_bitstream(s, width-1);
01380
01381 for(y=1; y<s->height; y++){
01382 uint8_t *dst = data + y*stride;
01383 if(s->predictor == PLANE && s->interlaced < y){
01384 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
01385 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01386 }else{
01387 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01388 }
01389 encode_bgr_bitstream(s, width);
01390 }
01391 }else{
01392 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01393 }
01394 emms_c();
01395
01396 size+= (put_bits_count(&s->pb)+31)/8;
01397 put_bits(&s->pb, 16, 0);
01398 put_bits(&s->pb, 15, 0);
01399 size/= 4;
01400
01401 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
01402 int j;
01403 char *p= avctx->stats_out;
01404 char *end= p + 1024*30;
01405 for(i=0; i<3; i++){
01406 for(j=0; j<256; j++){
01407 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01408 p+= strlen(p);
01409 s->stats[i][j]= 0;
01410 }
01411 snprintf(p, end-p, "\n");
01412 p++;
01413 }
01414 } else
01415 avctx->stats_out[0] = '\0';
01416 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
01417 flush_put_bits(&s->pb);
01418 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
01419 }
01420
01421 s->picture_number++;
01422
01423 return size*4;
01424 }
01425
01426 static av_cold int encode_end(AVCodecContext *avctx)
01427 {
01428 HYuvContext *s = avctx->priv_data;
01429
01430 common_end(s);
01431
01432 av_freep(&avctx->extradata);
01433 av_freep(&avctx->stats_out);
01434
01435 return 0;
01436 }
01437 #endif
01438
01439 #if CONFIG_HUFFYUV_DECODER
01440 AVCodec ff_huffyuv_decoder = {
01441 .name = "huffyuv",
01442 .type = AVMEDIA_TYPE_VIDEO,
01443 .id = CODEC_ID_HUFFYUV,
01444 .priv_data_size = sizeof(HYuvContext),
01445 .init = decode_init,
01446 .close = decode_end,
01447 .decode = decode_frame,
01448 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01449 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01450 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01451 };
01452 #endif
01453
01454 #if CONFIG_FFVHUFF_DECODER
01455 AVCodec ff_ffvhuff_decoder = {
01456 .name = "ffvhuff",
01457 .type = AVMEDIA_TYPE_VIDEO,
01458 .id = CODEC_ID_FFVHUFF,
01459 .priv_data_size = sizeof(HYuvContext),
01460 .init = decode_init,
01461 .close = decode_end,
01462 .decode = decode_frame,
01463 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01464 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01465 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01466 };
01467 #endif
01468
01469 #if CONFIG_HUFFYUV_ENCODER
01470 AVCodec ff_huffyuv_encoder = {
01471 .name = "huffyuv",
01472 .type = AVMEDIA_TYPE_VIDEO,
01473 .id = CODEC_ID_HUFFYUV,
01474 .priv_data_size = sizeof(HYuvContext),
01475 .init = encode_init,
01476 .encode = encode_frame,
01477 .close = encode_end,
01478 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
01479 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01480 };
01481 #endif
01482
01483 #if CONFIG_FFVHUFF_ENCODER
01484 AVCodec ff_ffvhuff_encoder = {
01485 .name = "ffvhuff",
01486 .type = AVMEDIA_TYPE_VIDEO,
01487 .id = CODEC_ID_FFVHUFF,
01488 .priv_data_size = sizeof(HYuvContext),
01489 .init = encode_init,
01490 .encode = encode_frame,
01491 .close = encode_end,
01492 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
01493 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01494 };
01495 #endif