• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

libavcodec/mpegvideo.c

Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of Libav.
00009  *
00010  * Libav is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * Libav is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with Libav; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 //#undef NDEBUG
00045 //#include <assert.h>
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 /* enable all paranoid tests for rounding, overflows, etc... */
00064 //#define PARANOID
00065 
00066 //#define DEBUG
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
00071      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
00072     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074 
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00089     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098 
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110 
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122 
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124     ff_mpeg1_dc_scale_table,
00125     mpeg2_dc_scale_table1,
00126     mpeg2_dc_scale_table2,
00127     mpeg2_dc_scale_table3,
00128 };
00129 
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131     PIX_FMT_YUV420P,
00132     PIX_FMT_NONE
00133 };
00134 
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136     PIX_FMT_DXVA2_VLD,
00137     PIX_FMT_VAAPI_VLD,
00138     PIX_FMT_VDA_VLD,
00139     PIX_FMT_YUV420P,
00140     PIX_FMT_NONE
00141 };
00142 
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144                                           const uint8_t *end,
00145                                           uint32_t * restrict state)
00146 {
00147     int i;
00148 
00149     assert(p <= end);
00150     if (p >= end)
00151         return end;
00152 
00153     for (i = 0; i < 3; i++) {
00154         uint32_t tmp = *state << 8;
00155         *state = tmp + *(p++);
00156         if (tmp == 0x100 || p == end)
00157             return p;
00158     }
00159 
00160     while (p < end) {
00161         if      (p[-1] > 1      ) p += 3;
00162         else if (p[-2]          ) p += 2;
00163         else if (p[-3]|(p[-1]-1)) p++;
00164         else {
00165             p++;
00166             break;
00167         }
00168     }
00169 
00170     p = FFMIN(p, end) - 4;
00171     *state = AV_RB32(p);
00172 
00173     return p + 4;
00174 }
00175 
00176 /* init common dct for both encoder and decoder */
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179     dsputil_init(&s->dsp, s->avctx);
00180 
00181     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186     if (s->flags & CODEC_FLAG_BITEXACT)
00187         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189 
00190 #if HAVE_MMX
00191     MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193     MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195     MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197     MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199     MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201     MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203     MPV_common_init_bfin(s);
00204 #endif
00205 
00206     /* load & permutate scantables
00207      * note: only wmv uses different ones
00208      */
00209     if (s->alternate_scan) {
00210         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00211         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00212     } else {
00213         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00214         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00215     }
00216     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218 
00219     return 0;
00220 }
00221 
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224     *dst = *src;
00225     dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227 
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233     /* Windows Media Image codecs allocate internal buffers with different
00234      * dimensions; ignore user defined callbacks for these
00235      */
00236     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237         ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238     else
00239         avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240     av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242 
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248     int r;
00249 
00250     if (s->avctx->hwaccel) {
00251         assert(!pic->f.hwaccel_picture_private);
00252         if (s->avctx->hwaccel->priv_data_size) {
00253             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254             if (!pic->f.hwaccel_picture_private) {
00255                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256                 return -1;
00257             }
00258         }
00259     }
00260 
00261     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262         r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263     else
00264         r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265 
00266     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268                r, pic->f.type, pic->f.data[0]);
00269         av_freep(&pic->f.hwaccel_picture_private);
00270         return -1;
00271     }
00272 
00273     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
00274                         s->uvlinesize != pic->f.linesize[1])) {
00275         av_log(s->avctx, AV_LOG_ERROR,
00276                "get_buffer() failed (stride changed)\n");
00277         free_frame_buffer(s, pic);
00278         return -1;
00279     }
00280 
00281     if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282         av_log(s->avctx, AV_LOG_ERROR,
00283                "get_buffer() failed (uv stride mismatch)\n");
00284         free_frame_buffer(s, pic);
00285         return -1;
00286     }
00287 
00288     return 0;
00289 }
00290 
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298 
00299     // the + 1 is needed so memset(,,stride*height) does not sig11
00300 
00301     const int mb_array_size = s->mb_stride * s->mb_height;
00302     const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303     const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304     int i;
00305     int r = -1;
00306 
00307     if (shared) {
00308         assert(pic->f.data[0]);
00309         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310         pic->f.type = FF_BUFFER_TYPE_SHARED;
00311     } else {
00312         assert(!pic->f.data[0]);
00313 
00314         if (alloc_frame_buffer(s, pic) < 0)
00315             return -1;
00316 
00317         s->linesize   = pic->f.linesize[0];
00318         s->uvlinesize = pic->f.linesize[1];
00319     }
00320 
00321     if (pic->f.qscale_table == NULL) {
00322         if (s->encoding) {
00323             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324                               mb_array_size * sizeof(int16_t), fail)
00325             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326                               mb_array_size * sizeof(int16_t), fail)
00327             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328                               mb_array_size * sizeof(int8_t ), fail)
00329         }
00330 
00331         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332                           mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
00333         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335                           fail)
00336         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338                           fail)
00339         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341         if (s->out_format == FMT_H264) {
00342             for (i = 0; i < 2; i++) {
00343                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344                                   2 * (b4_array_size + 4) * sizeof(int16_t),
00345                                   fail)
00346                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348                                   4 * mb_array_size * sizeof(uint8_t), fail)
00349             }
00350             pic->f.motion_subsample_log2 = 2;
00351         } else if (s->out_format == FMT_H263 || s->encoding ||
00352                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353             for (i = 0; i < 2; i++) {
00354                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355                                   2 * (b8_array_size + 4) * sizeof(int16_t),
00356                                   fail)
00357                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359                                   4 * mb_array_size * sizeof(uint8_t), fail)
00360             }
00361             pic->f.motion_subsample_log2 = 3;
00362         }
00363         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366         }
00367         pic->f.qstride = s->mb_stride;
00368         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369                           1 * sizeof(AVPanScan), fail)
00370     }
00371 
00372     pic->owner2 = s;
00373 
00374     return 0;
00375 fail: // for  the FF_ALLOCZ_OR_GOTO macro
00376     if (r >= 0)
00377         free_frame_buffer(s, pic);
00378     return -1;
00379 }
00380 
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386     int i;
00387 
00388     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389         free_frame_buffer(s, pic);
00390     }
00391 
00392     av_freep(&pic->mb_var);
00393     av_freep(&pic->mc_mb_var);
00394     av_freep(&pic->mb_mean);
00395     av_freep(&pic->f.mbskip_table);
00396     av_freep(&pic->qscale_table_base);
00397     av_freep(&pic->mb_type_base);
00398     av_freep(&pic->f.dct_coeff);
00399     av_freep(&pic->f.pan_scan);
00400     pic->f.mb_type = NULL;
00401     for (i = 0; i < 2; i++) {
00402         av_freep(&pic->motion_val_base[i]);
00403         av_freep(&pic->f.ref_index[i]);
00404     }
00405 
00406     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407         for (i = 0; i < 4; i++) {
00408             pic->f.base[i] =
00409             pic->f.data[i] = NULL;
00410         }
00411         pic->f.type = 0;
00412     }
00413 }
00414 
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418     int c_size = s->mb_stride * (s->mb_height + 1);
00419     int yc_size = y_size + 2 * c_size;
00420     int i;
00421 
00422     // edge emu needs blocksize + filter length - 1
00423     // (= 17x17 for  halfpel / 21x21 for  h264)
00424     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425                       (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
00426 
00427     // FIXME should be linesize instead of s->width * 2
00428     // but that is not known before get_buffer()
00429     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431     s->me.temp         = s->me.scratchpad;
00432     s->rd_scratchpad   = s->me.scratchpad;
00433     s->b_scratchpad    = s->me.scratchpad;
00434     s->obmc_scratchpad = s->me.scratchpad + 16;
00435     if (s->encoding) {
00436         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00438         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00440         if (s->avctx->noise_reduction) {
00441             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442                               2 * 64 * sizeof(int), fail)
00443         }
00444     }
00445     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446     s->block = s->blocks[0];
00447 
00448     for (i = 0; i < 12; i++) {
00449         s->pblocks[i] = &s->block[i];
00450     }
00451 
00452     if (s->out_format == FMT_H263) {
00453         /* ac values */
00454         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455                           yc_size * sizeof(int16_t) * 16, fail);
00456         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458         s->ac_val[2] = s->ac_val[1] + c_size;
00459     }
00460 
00461     return 0;
00462 fail:
00463     return -1; // free() through MPV_common_end()
00464 }
00465 
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468     if (s == NULL)
00469         return;
00470 
00471     av_freep(&s->edge_emu_buffer);
00472     av_freep(&s->me.scratchpad);
00473     s->me.temp =
00474     s->rd_scratchpad =
00475     s->b_scratchpad =
00476     s->obmc_scratchpad = NULL;
00477 
00478     av_freep(&s->dct_error_sum);
00479     av_freep(&s->me.map);
00480     av_freep(&s->me.score_map);
00481     av_freep(&s->blocks);
00482     av_freep(&s->ac_val_base);
00483     s->block = NULL;
00484 }
00485 
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489     COPY(edge_emu_buffer);
00490     COPY(me.scratchpad);
00491     COPY(me.temp);
00492     COPY(rd_scratchpad);
00493     COPY(b_scratchpad);
00494     COPY(obmc_scratchpad);
00495     COPY(me.map);
00496     COPY(me.score_map);
00497     COPY(blocks);
00498     COPY(block);
00499     COPY(start_mb_y);
00500     COPY(end_mb_y);
00501     COPY(me.map_generation);
00502     COPY(pb);
00503     COPY(dct_error_sum);
00504     COPY(dct_count[0]);
00505     COPY(dct_count[1]);
00506     COPY(ac_val_base);
00507     COPY(ac_val[0]);
00508     COPY(ac_val[1]);
00509     COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512 
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515     MpegEncContext bak;
00516     int i;
00517     // FIXME copy only needed parts
00518     // START_TIMER
00519     backup_duplicate_context(&bak, dst);
00520     memcpy(dst, src, sizeof(MpegEncContext));
00521     backup_duplicate_context(dst, &bak);
00522     for (i = 0; i < 12; i++) {
00523         dst->pblocks[i] = &dst->block[i];
00524     }
00525     // STOP_TIMER("update_duplicate_context")
00526     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
00527 }
00528 
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530                                   const AVCodecContext *src)
00531 {
00532     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533 
00534     if (dst == src || !s1->context_initialized)
00535         return 0;
00536 
00537     // FIXME can parameters change on I-frames?
00538     // in that case dst may need a reinit
00539     if (!s->context_initialized) {
00540         memcpy(s, s1, sizeof(MpegEncContext));
00541 
00542         s->avctx                 = dst;
00543         s->picture_range_start  += MAX_PICTURE_COUNT;
00544         s->picture_range_end    += MAX_PICTURE_COUNT;
00545         s->bitstream_buffer      = NULL;
00546         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547 
00548         MPV_common_init(s);
00549     }
00550 
00551     s->avctx->coded_height  = s1->avctx->coded_height;
00552     s->avctx->coded_width   = s1->avctx->coded_width;
00553     s->avctx->width         = s1->avctx->width;
00554     s->avctx->height        = s1->avctx->height;
00555 
00556     s->coded_picture_number = s1->coded_picture_number;
00557     s->picture_number       = s1->picture_number;
00558     s->input_picture_number = s1->input_picture_number;
00559 
00560     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561     memcpy(&s->last_picture, &s1->last_picture,
00562            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563 
00564     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00565     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00567 
00568     // Error/bug resilience
00569     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570     s->workaround_bugs      = s1->workaround_bugs;
00571 
00572     // MPEG4 timing info
00573     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00574            (char *) &s1->shape - (char *) &s1->time_increment_bits);
00575 
00576     // B-frame info
00577     s->max_b_frames = s1->max_b_frames;
00578     s->low_delay    = s1->low_delay;
00579     s->dropable     = s1->dropable;
00580 
00581     // DivX handling (doesn't work)
00582     s->divx_packed  = s1->divx_packed;
00583 
00584     if (s1->bitstream_buffer) {
00585         if (s1->bitstream_buffer_size +
00586             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00587             av_fast_malloc(&s->bitstream_buffer,
00588                            &s->allocated_bitstream_buffer_size,
00589                            s1->allocated_bitstream_buffer_size);
00590             s->bitstream_buffer_size = s1->bitstream_buffer_size;
00591         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00592                s1->bitstream_buffer_size);
00593         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00594                FF_INPUT_BUFFER_PADDING_SIZE);
00595     }
00596 
00597     // MPEG2/interlacing info
00598     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00599            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00600 
00601     if (!s1->first_field) {
00602         s->last_pict_type = s1->pict_type;
00603         if (s1->current_picture_ptr)
00604             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00605 
00606         if (s1->pict_type != AV_PICTURE_TYPE_B) {
00607             s->last_non_b_pict_type = s1->pict_type;
00608         }
00609     }
00610 
00611     return 0;
00612 }
00613 
00620 void MPV_common_defaults(MpegEncContext *s)
00621 {
00622     s->y_dc_scale_table      =
00623     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
00624     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
00625     s->progressive_frame     = 1;
00626     s->progressive_sequence  = 1;
00627     s->picture_structure     = PICT_FRAME;
00628 
00629     s->coded_picture_number  = 0;
00630     s->picture_number        = 0;
00631     s->input_picture_number  = 0;
00632 
00633     s->picture_in_gop_number = 0;
00634 
00635     s->f_code                = 1;
00636     s->b_code                = 1;
00637 
00638     s->picture_range_start   = 0;
00639     s->picture_range_end     = MAX_PICTURE_COUNT;
00640 
00641     s->slice_context_count   = 1;
00642 }
00643 
00649 void MPV_decode_defaults(MpegEncContext *s)
00650 {
00651     MPV_common_defaults(s);
00652 }
00653 
00658 av_cold int MPV_common_init(MpegEncContext *s)
00659 {
00660     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00661     int nb_slices = (HAVE_THREADS &&
00662                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00663                     s->avctx->thread_count : 1;
00664 
00665     if (s->encoding && s->avctx->slices)
00666         nb_slices = s->avctx->slices;
00667 
00668     if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00669         s->mb_height = (s->height + 31) / 32 * 2;
00670     else if (s->codec_id != CODEC_ID_H264)
00671         s->mb_height = (s->height + 15) / 16;
00672 
00673     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00674         av_log(s->avctx, AV_LOG_ERROR,
00675                "decoding to PIX_FMT_NONE is not supported.\n");
00676         return -1;
00677     }
00678 
00679     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00680         int max_slices;
00681         if (s->mb_height)
00682             max_slices = FFMIN(MAX_THREADS, s->mb_height);
00683         else
00684             max_slices = MAX_THREADS;
00685         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00686                " reducing to %d\n", nb_slices, max_slices);
00687         nb_slices = max_slices;
00688     }
00689 
00690     if ((s->width || s->height) &&
00691         av_image_check_size(s->width, s->height, 0, s->avctx))
00692         return -1;
00693 
00694     ff_dct_common_init(s);
00695 
00696     s->flags  = s->avctx->flags;
00697     s->flags2 = s->avctx->flags2;
00698 
00699     if (s->width && s->height) {
00700         s->mb_width   = (s->width + 15) / 16;
00701         s->mb_stride  = s->mb_width + 1;
00702         s->b8_stride  = s->mb_width * 2 + 1;
00703         s->b4_stride  = s->mb_width * 4 + 1;
00704         mb_array_size = s->mb_height * s->mb_stride;
00705         mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00706 
00707         /* set chroma shifts */
00708         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00709                                       &s->chroma_y_shift);
00710 
00711         /* set default edge pos, will be overriden
00712          * in decode_header if needed */
00713         s->h_edge_pos = s->mb_width * 16;
00714         s->v_edge_pos = s->mb_height * 16;
00715 
00716         s->mb_num     = s->mb_width * s->mb_height;
00717 
00718         s->block_wrap[0] =
00719         s->block_wrap[1] =
00720         s->block_wrap[2] =
00721         s->block_wrap[3] = s->b8_stride;
00722         s->block_wrap[4] =
00723         s->block_wrap[5] = s->mb_stride;
00724 
00725         y_size  = s->b8_stride * (2 * s->mb_height + 1);
00726         c_size  = s->mb_stride * (s->mb_height + 1);
00727         yc_size = y_size + 2   * c_size;
00728 
00729         /* convert fourcc to upper case */
00730         s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
00731 
00732         s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
00733 
00734         s->avctx->coded_frame = (AVFrame *)&s->current_picture;
00735 
00736         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
00737                           fail); // error ressilience code looks cleaner with this
00738         for (y = 0; y < s->mb_height; y++)
00739             for (x = 0; x < s->mb_width; x++)
00740                 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00741 
00742         s->mb_index2xy[s->mb_height * s->mb_width] =
00743                        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
00744 
00745         if (s->encoding) {
00746             /* Allocate MV tables */
00747             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
00748                               mv_table_size * 2 * sizeof(int16_t), fail);
00749             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
00750                               mv_table_size * 2 * sizeof(int16_t), fail);
00751             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
00752                               mv_table_size * 2 * sizeof(int16_t), fail);
00753             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
00754                               mv_table_size * 2 * sizeof(int16_t), fail);
00755             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
00756                               mv_table_size * 2 * sizeof(int16_t), fail);
00757             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
00758                               mv_table_size * 2 * sizeof(int16_t), fail);
00759             s->p_mv_table            = s->p_mv_table_base +
00760                                        s->mb_stride + 1;
00761             s->b_forw_mv_table       = s->b_forw_mv_table_base +
00762                                        s->mb_stride + 1;
00763             s->b_back_mv_table       = s->b_back_mv_table_base +
00764                                        s->mb_stride + 1;
00765             s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
00766                                        s->mb_stride + 1;
00767             s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
00768                                        s->mb_stride + 1;
00769             s->b_direct_mv_table     = s->b_direct_mv_table_base +
00770                                        s->mb_stride + 1;
00771 
00772             if (s->msmpeg4_version) {
00773                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00774                                   2 * 2 * (MAX_LEVEL + 1) *
00775                                   (MAX_RUN + 1) * 2 * sizeof(int), fail);
00776             }
00777             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00778 
00779             /* Allocate MB type table */
00780             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
00781                               sizeof(uint16_t), fail); // needed for encoding
00782 
00783             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
00784                               sizeof(int), fail);
00785 
00786             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
00787                               64 * 32   * sizeof(int), fail);
00788             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
00789                               64 * 32   * sizeof(int), fail);
00790             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
00791                               64 * 32 * 2 * sizeof(uint16_t), fail);
00792             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
00793                               64 * 32 * 2 * sizeof(uint16_t), fail);
00794             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
00795                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00796             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
00797                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00798 
00799             if (s->avctx->noise_reduction) {
00800                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
00801                                   2 * 64 * sizeof(uint16_t), fail);
00802             }
00803         }
00804     }
00805 
00806     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00807     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00808                       s->picture_count * sizeof(Picture), fail);
00809     for (i = 0; i < s->picture_count; i++) {
00810         avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00811     }
00812 
00813     if (s->width && s->height) {
00814         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00815                           mb_array_size * sizeof(uint8_t), fail);
00816 
00817         if (s->codec_id == CODEC_ID_MPEG4 ||
00818             (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00819             /* interlaced direct mode decoding tables */
00820             for (i = 0; i < 2; i++) {
00821                 int j, k;
00822                 for (j = 0; j < 2; j++) {
00823                     for (k = 0; k < 2; k++) {
00824                         FF_ALLOCZ_OR_GOTO(s->avctx,
00825                                           s->b_field_mv_table_base[i][j][k],
00826                                           mv_table_size * 2 * sizeof(int16_t),
00827                                           fail);
00828                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00829                                                        s->mb_stride + 1;
00830                     }
00831                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
00832                                       mb_array_size * 2 * sizeof(uint8_t),
00833                                       fail);
00834                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
00835                                       mv_table_size * 2 * sizeof(int16_t),
00836                                       fail);
00837                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
00838                                                 + s->mb_stride + 1;
00839                 }
00840                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
00841                                   mb_array_size * 2 * sizeof(uint8_t),
00842                                   fail);
00843             }
00844         }
00845         if (s->out_format == FMT_H263) {
00846             /* cbp values */
00847             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00848             s->coded_block = s->coded_block_base + s->b8_stride + 1;
00849 
00850             /* cbp, ac_pred, pred_dir */
00851             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
00852                               mb_array_size * sizeof(uint8_t), fail);
00853             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
00854                               mb_array_size * sizeof(uint8_t), fail);
00855         }
00856 
00857         if (s->h263_pred || s->h263_plus || !s->encoding) {
00858             /* dc values */
00859             // MN: we need these for  error resilience of intra-frames
00860             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
00861                               yc_size * sizeof(int16_t), fail);
00862             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00863             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00864             s->dc_val[2] = s->dc_val[1] + c_size;
00865             for (i = 0; i < yc_size; i++)
00866                 s->dc_val_base[i] = 1024;
00867         }
00868 
00869         /* which mb is a intra block */
00870         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00871         memset(s->mbintra_table, 1, mb_array_size);
00872 
00873         /* init macroblock skip table */
00874         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00875         // Note the + 1 is for  a quicker mpeg4 slice_end detection
00876 
00877         s->parse_context.state = -1;
00878         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
00879             s->avctx->debug_mv) {
00880             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
00881                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00882             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
00883                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00884             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
00885                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00886         }
00887     }
00888 
00889     s->context_initialized = 1;
00890     s->thread_context[0]   = s;
00891 
00892     if (s->width && s->height) {
00893         if (nb_slices > 1) {
00894             for (i = 1; i < nb_slices; i++) {
00895                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00896                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00897             }
00898 
00899             for (i = 0; i < nb_slices; i++) {
00900                 if (init_duplicate_context(s->thread_context[i], s) < 0)
00901                     goto fail;
00902                     s->thread_context[i]->start_mb_y =
00903                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00904                     s->thread_context[i]->end_mb_y   =
00905                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00906             }
00907         } else {
00908             if (init_duplicate_context(s, s) < 0)
00909                 goto fail;
00910             s->start_mb_y = 0;
00911             s->end_mb_y   = s->mb_height;
00912         }
00913         s->slice_context_count = nb_slices;
00914     }
00915 
00916     return 0;
00917  fail:
00918     MPV_common_end(s);
00919     return -1;
00920 }
00921 
00922 /* init common structure for both encoder and decoder */
00923 void MPV_common_end(MpegEncContext *s)
00924 {
00925     int i, j, k;
00926 
00927     if (s->slice_context_count > 1) {
00928         for (i = 0; i < s->slice_context_count; i++) {
00929             free_duplicate_context(s->thread_context[i]);
00930         }
00931         for (i = 1; i < s->slice_context_count; i++) {
00932             av_freep(&s->thread_context[i]);
00933         }
00934         s->slice_context_count = 1;
00935     } else free_duplicate_context(s);
00936 
00937     av_freep(&s->parse_context.buffer);
00938     s->parse_context.buffer_size = 0;
00939 
00940     av_freep(&s->mb_type);
00941     av_freep(&s->p_mv_table_base);
00942     av_freep(&s->b_forw_mv_table_base);
00943     av_freep(&s->b_back_mv_table_base);
00944     av_freep(&s->b_bidir_forw_mv_table_base);
00945     av_freep(&s->b_bidir_back_mv_table_base);
00946     av_freep(&s->b_direct_mv_table_base);
00947     s->p_mv_table            = NULL;
00948     s->b_forw_mv_table       = NULL;
00949     s->b_back_mv_table       = NULL;
00950     s->b_bidir_forw_mv_table = NULL;
00951     s->b_bidir_back_mv_table = NULL;
00952     s->b_direct_mv_table     = NULL;
00953     for (i = 0; i < 2; i++) {
00954         for (j = 0; j < 2; j++) {
00955             for (k = 0; k < 2; k++) {
00956                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00957                 s->b_field_mv_table[i][j][k] = NULL;
00958             }
00959             av_freep(&s->b_field_select_table[i][j]);
00960             av_freep(&s->p_field_mv_table_base[i][j]);
00961             s->p_field_mv_table[i][j] = NULL;
00962         }
00963         av_freep(&s->p_field_select_table[i]);
00964     }
00965 
00966     av_freep(&s->dc_val_base);
00967     av_freep(&s->coded_block_base);
00968     av_freep(&s->mbintra_table);
00969     av_freep(&s->cbp_table);
00970     av_freep(&s->pred_dir_table);
00971 
00972     av_freep(&s->mbskip_table);
00973     av_freep(&s->bitstream_buffer);
00974     s->allocated_bitstream_buffer_size = 0;
00975 
00976     av_freep(&s->avctx->stats_out);
00977     av_freep(&s->ac_stats);
00978     av_freep(&s->error_status_table);
00979     av_freep(&s->mb_index2xy);
00980     av_freep(&s->lambda_table);
00981     av_freep(&s->q_intra_matrix);
00982     av_freep(&s->q_inter_matrix);
00983     av_freep(&s->q_intra_matrix16);
00984     av_freep(&s->q_inter_matrix16);
00985     av_freep(&s->input_picture);
00986     av_freep(&s->reordered_input_picture);
00987     av_freep(&s->dct_offset);
00988 
00989     if (s->picture && !s->avctx->internal->is_copy) {
00990         for (i = 0; i < s->picture_count; i++) {
00991             free_picture(s, &s->picture[i]);
00992         }
00993     }
00994     av_freep(&s->picture);
00995     s->context_initialized      = 0;
00996     s->last_picture_ptr         =
00997     s->next_picture_ptr         =
00998     s->current_picture_ptr      = NULL;
00999     s->linesize = s->uvlinesize = 0;
01000 
01001     for (i = 0; i < 3; i++)
01002         av_freep(&s->visualization_buffer[i]);
01003 
01004     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
01005         avcodec_default_free_buffers(s->avctx);
01006 }
01007 
01008 void ff_init_rl(RLTable *rl,
01009                 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01010 {
01011     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01012     uint8_t index_run[MAX_RUN + 1];
01013     int last, run, level, start, end, i;
01014 
01015     /* If table is static, we can quit if rl->max_level[0] is not NULL */
01016     if (static_store && rl->max_level[0])
01017         return;
01018 
01019     /* compute max_level[], max_run[] and index_run[] */
01020     for (last = 0; last < 2; last++) {
01021         if (last == 0) {
01022             start = 0;
01023             end = rl->last;
01024         } else {
01025             start = rl->last;
01026             end = rl->n;
01027         }
01028 
01029         memset(max_level, 0, MAX_RUN + 1);
01030         memset(max_run, 0, MAX_LEVEL + 1);
01031         memset(index_run, rl->n, MAX_RUN + 1);
01032         for (i = start; i < end; i++) {
01033             run   = rl->table_run[i];
01034             level = rl->table_level[i];
01035             if (index_run[run] == rl->n)
01036                 index_run[run] = i;
01037             if (level > max_level[run])
01038                 max_level[run] = level;
01039             if (run > max_run[level])
01040                 max_run[level] = run;
01041         }
01042         if (static_store)
01043             rl->max_level[last] = static_store[last];
01044         else
01045             rl->max_level[last] = av_malloc(MAX_RUN + 1);
01046         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01047         if (static_store)
01048             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
01049         else
01050             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
01051         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01052         if (static_store)
01053             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01054         else
01055             rl->index_run[last] = av_malloc(MAX_RUN + 1);
01056         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01057     }
01058 }
01059 
01060 void ff_init_vlc_rl(RLTable *rl)
01061 {
01062     int i, q;
01063 
01064     for (q = 0; q < 32; q++) {
01065         int qmul = q * 2;
01066         int qadd = (q - 1) | 1;
01067 
01068         if (q == 0) {
01069             qmul = 1;
01070             qadd = 0;
01071         }
01072         for (i = 0; i < rl->vlc.table_size; i++) {
01073             int code = rl->vlc.table[i][0];
01074             int len  = rl->vlc.table[i][1];
01075             int level, run;
01076 
01077             if (len == 0) { // illegal code
01078                 run   = 66;
01079                 level = MAX_LEVEL;
01080             } else if (len < 0) { // more bits needed
01081                 run   = 0;
01082                 level = code;
01083             } else {
01084                 if (code == rl->n) { // esc
01085                     run   = 66;
01086                     level =  0;
01087                 } else {
01088                     run   = rl->table_run[code] + 1;
01089                     level = rl->table_level[code] * qmul + qadd;
01090                     if (code >= rl->last) run += 192;
01091                 }
01092             }
01093             rl->rl_vlc[q][i].len   = len;
01094             rl->rl_vlc[q][i].level = level;
01095             rl->rl_vlc[q][i].run   = run;
01096         }
01097     }
01098 }
01099 
01100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01101 {
01102     int i;
01103 
01104     /* release non reference frames */
01105     for (i = 0; i < s->picture_count; i++) {
01106         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01107             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01108             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
01109             /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
01110             free_frame_buffer(s, &s->picture[i]);
01111         }
01112     }
01113 }
01114 
01115 int ff_find_unused_picture(MpegEncContext *s, int shared)
01116 {
01117     int i;
01118 
01119     if (shared) {
01120         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01121             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01122                 return i;
01123         }
01124     } else {
01125         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01126             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01127                 return i; // FIXME
01128         }
01129         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01130             if (s->picture[i].f.data[0] == NULL)
01131                 return i;
01132         }
01133     }
01134 
01135     return AVERROR_INVALIDDATA;
01136 }
01137 
01138 static void update_noise_reduction(MpegEncContext *s)
01139 {
01140     int intra, i;
01141 
01142     for (intra = 0; intra < 2; intra++) {
01143         if (s->dct_count[intra] > (1 << 16)) {
01144             for (i = 0; i < 64; i++) {
01145                 s->dct_error_sum[intra][i] >>= 1;
01146             }
01147             s->dct_count[intra] >>= 1;
01148         }
01149 
01150         for (i = 0; i < 64; i++) {
01151             s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01152                                        s->dct_count[intra] +
01153                                        s->dct_error_sum[intra][i] / 2) /
01154                                       (s->dct_error_sum[intra][i] + 1);
01155         }
01156     }
01157 }
01158 
01163 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01164 {
01165     int i;
01166     Picture *pic;
01167     s->mb_skipped = 0;
01168 
01169     assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01170            s->codec_id == CODEC_ID_SVQ3);
01171 
01172     /* mark & release old frames */
01173     if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01174         if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01175             s->last_picture_ptr != s->next_picture_ptr &&
01176             s->last_picture_ptr->f.data[0]) {
01177             if (s->last_picture_ptr->owner2 == s)
01178                 free_frame_buffer(s, s->last_picture_ptr);
01179         }
01180 
01181         /* release forgotten pictures */
01182         /* if (mpeg124/h263) */
01183         if (!s->encoding) {
01184             for (i = 0; i < s->picture_count; i++) {
01185                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01186                     &s->picture[i] != s->last_picture_ptr &&
01187                     &s->picture[i] != s->next_picture_ptr &&
01188                     s->picture[i].f.reference) {
01189                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01190                         av_log(avctx, AV_LOG_ERROR,
01191                                "releasing zombie picture\n");
01192                     free_frame_buffer(s, &s->picture[i]);
01193                 }
01194             }
01195         }
01196     }
01197 
01198     if (!s->encoding) {
01199         ff_release_unused_pictures(s, 1);
01200 
01201         if (s->current_picture_ptr &&
01202             s->current_picture_ptr->f.data[0] == NULL) {
01203             // we already have a unused image
01204             // (maybe it was set before reading the header)
01205             pic = s->current_picture_ptr;
01206         } else {
01207             i   = ff_find_unused_picture(s, 0);
01208             pic = &s->picture[i];
01209         }
01210 
01211         pic->f.reference = 0;
01212         if (!s->dropable) {
01213             if (s->codec_id == CODEC_ID_H264)
01214                 pic->f.reference = s->picture_structure;
01215             else if (s->pict_type != AV_PICTURE_TYPE_B)
01216                 pic->f.reference = 3;
01217         }
01218 
01219         pic->f.coded_picture_number = s->coded_picture_number++;
01220 
01221         if (ff_alloc_picture(s, pic, 0) < 0)
01222             return -1;
01223 
01224         s->current_picture_ptr = pic;
01225         // FIXME use only the vars from current_pic
01226         s->current_picture_ptr->f.top_field_first = s->top_field_first;
01227         if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01228             s->codec_id == CODEC_ID_MPEG2VIDEO) {
01229             if (s->picture_structure != PICT_FRAME)
01230                 s->current_picture_ptr->f.top_field_first =
01231                     (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01232         }
01233         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01234                                                      !s->progressive_sequence;
01235         s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
01236     }
01237 
01238     s->current_picture_ptr->f.pict_type = s->pict_type;
01239     // if (s->flags && CODEC_FLAG_QSCALE)
01240     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
01241     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01242 
01243     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01244 
01245     if (s->pict_type != AV_PICTURE_TYPE_B) {
01246         s->last_picture_ptr = s->next_picture_ptr;
01247         if (!s->dropable)
01248             s->next_picture_ptr = s->current_picture_ptr;
01249     }
01250     /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
01251            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01252            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
01253            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
01254            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
01255            s->pict_type, s->dropable); */
01256 
01257     if (s->codec_id != CODEC_ID_H264) {
01258         if ((s->last_picture_ptr == NULL ||
01259              s->last_picture_ptr->f.data[0] == NULL) &&
01260             (s->pict_type != AV_PICTURE_TYPE_I ||
01261              s->picture_structure != PICT_FRAME)) {
01262             if (s->pict_type != AV_PICTURE_TYPE_I)
01263                 av_log(avctx, AV_LOG_ERROR,
01264                        "warning: first frame is no keyframe\n");
01265             else if (s->picture_structure != PICT_FRAME)
01266                 av_log(avctx, AV_LOG_INFO,
01267                        "allocate dummy last picture for field based first keyframe\n");
01268 
01269             /* Allocate a dummy frame */
01270             i = ff_find_unused_picture(s, 0);
01271             s->last_picture_ptr = &s->picture[i];
01272             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01273                 return -1;
01274             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01275                                       INT_MAX, 0);
01276             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01277                                       INT_MAX, 1);
01278         }
01279         if ((s->next_picture_ptr == NULL ||
01280              s->next_picture_ptr->f.data[0] == NULL) &&
01281             s->pict_type == AV_PICTURE_TYPE_B) {
01282             /* Allocate a dummy frame */
01283             i = ff_find_unused_picture(s, 0);
01284             s->next_picture_ptr = &s->picture[i];
01285             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01286                 return -1;
01287             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01288                                       INT_MAX, 0);
01289             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01290                                       INT_MAX, 1);
01291         }
01292     }
01293 
01294     if (s->last_picture_ptr)
01295         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01296     if (s->next_picture_ptr)
01297         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01298 
01299     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01300         (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01301         if (s->next_picture_ptr)
01302             s->next_picture_ptr->owner2 = s;
01303         if (s->last_picture_ptr)
01304             s->last_picture_ptr->owner2 = s;
01305     }
01306 
01307     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01308                                                  s->last_picture_ptr->f.data[0]));
01309 
01310     if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01311         int i;
01312         for (i = 0; i < 4; i++) {
01313             if (s->picture_structure == PICT_BOTTOM_FIELD) {
01314                 s->current_picture.f.data[i] +=
01315                     s->current_picture.f.linesize[i];
01316             }
01317             s->current_picture.f.linesize[i] *= 2;
01318             s->last_picture.f.linesize[i]    *= 2;
01319             s->next_picture.f.linesize[i]    *= 2;
01320         }
01321     }
01322 
01323     s->err_recognition = avctx->err_recognition;
01324 
01325     /* set dequantizer, we can't do it during init as
01326      * it might change for mpeg4 and we can't do it in the header
01327      * decode as init is not called for mpeg4 there yet */
01328     if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01329         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01330         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01331     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01332         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01333         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01334     } else {
01335         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01336         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01337     }
01338 
01339     if (s->dct_error_sum) {
01340         assert(s->avctx->noise_reduction && s->encoding);
01341         update_noise_reduction(s);
01342     }
01343 
01344     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01345         return ff_xvmc_field_start(s, avctx);
01346 
01347     return 0;
01348 }
01349 
01350 /* generic function for encode/decode called after a
01351  * frame has been coded/decoded. */
01352 void MPV_frame_end(MpegEncContext *s)
01353 {
01354     int i;
01355     /* redraw edges for the frame if decoding didn't complete */
01356     // just to make sure that all data is rendered.
01357     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01358         ff_xvmc_field_end(s);
01359    } else if ((s->error_count || s->encoding) &&
01360               !s->avctx->hwaccel &&
01361               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01362               s->unrestricted_mv &&
01363               s->current_picture.f.reference &&
01364               !s->intra_only &&
01365               !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01366         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01367         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01368         s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
01369                           s->h_edge_pos, s->v_edge_pos,
01370                           EDGE_WIDTH, EDGE_WIDTH,
01371                           EDGE_TOP | EDGE_BOTTOM);
01372         s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
01373                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01374                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01375                           EDGE_TOP | EDGE_BOTTOM);
01376         s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
01377                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01378                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01379                           EDGE_TOP | EDGE_BOTTOM);
01380     }
01381 
01382     emms_c();
01383 
01384     s->last_pict_type                 = s->pict_type;
01385     s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01386     if (s->pict_type!= AV_PICTURE_TYPE_B) {
01387         s->last_non_b_pict_type = s->pict_type;
01388     }
01389 #if 0
01390     /* copy back current_picture variables */
01391     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01392         if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01393             s->picture[i] = s->current_picture;
01394             break;
01395         }
01396     }
01397     assert(i < MAX_PICTURE_COUNT);
01398 #endif
01399 
01400     if (s->encoding) {
01401         /* release non-reference frames */
01402         for (i = 0; i < s->picture_count; i++) {
01403             if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01404                 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
01405                 free_frame_buffer(s, &s->picture[i]);
01406             }
01407         }
01408     }
01409     // clear copies, to avoid confusion
01410 #if 0
01411     memset(&s->last_picture,    0, sizeof(Picture));
01412     memset(&s->next_picture,    0, sizeof(Picture));
01413     memset(&s->current_picture, 0, sizeof(Picture));
01414 #endif
01415     s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01416 
01417     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01418         ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
01419     }
01420 }
01421 
01429 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01430                       int w, int h, int stride, int color)
01431 {
01432     int x, y, fr, f;
01433 
01434     sx = av_clip(sx, 0, w - 1);
01435     sy = av_clip(sy, 0, h - 1);
01436     ex = av_clip(ex, 0, w - 1);
01437     ey = av_clip(ey, 0, h - 1);
01438 
01439     buf[sy * stride + sx] += color;
01440 
01441     if (FFABS(ex - sx) > FFABS(ey - sy)) {
01442         if (sx > ex) {
01443             FFSWAP(int, sx, ex);
01444             FFSWAP(int, sy, ey);
01445         }
01446         buf += sx + sy * stride;
01447         ex  -= sx;
01448         f    = ((ey - sy) << 16) / ex;
01449         for (x = 0; x = ex; x++) {
01450             y  = (x * f) >> 16;
01451             fr = (x * f) & 0xFFFF;
01452             buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
01453             buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
01454         }
01455     } else {
01456         if (sy > ey) {
01457             FFSWAP(int, sx, ex);
01458             FFSWAP(int, sy, ey);
01459         }
01460         buf += sx + sy * stride;
01461         ey  -= sy;
01462         if (ey)
01463             f  = ((ex - sx) << 16) / ey;
01464         else
01465             f = 0;
01466         for (y = 0; y = ey; y++) {
01467             x  = (y * f) >> 16;
01468             fr = (y * f) & 0xFFFF;
01469             buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
01470             buf[y * stride + x + 1] += (color *            fr ) >> 16;
01471         }
01472     }
01473 }
01474 
01482 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01483                        int ey, int w, int h, int stride, int color)
01484 {
01485     int dx,dy;
01486 
01487     sx = av_clip(sx, -100, w + 100);
01488     sy = av_clip(sy, -100, h + 100);
01489     ex = av_clip(ex, -100, w + 100);
01490     ey = av_clip(ey, -100, h + 100);
01491 
01492     dx = ex - sx;
01493     dy = ey - sy;
01494 
01495     if (dx * dx + dy * dy > 3 * 3) {
01496         int rx =  dx + dy;
01497         int ry = -dx + dy;
01498         int length = ff_sqrt((rx * rx + ry * ry) << 8);
01499 
01500         // FIXME subpixel accuracy
01501         rx = ROUNDED_DIV(rx * 3 << 4, length);
01502         ry = ROUNDED_DIV(ry * 3 << 4, length);
01503 
01504         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01505         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01506     }
01507     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01508 }
01509 
01513 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01514 {
01515     if (s->avctx->hwaccel || !pict || !pict->mb_type)
01516         return;
01517 
01518     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01519         int x,y;
01520 
01521         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01522         switch (pict->pict_type) {
01523         case AV_PICTURE_TYPE_I:
01524             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
01525             break;
01526         case AV_PICTURE_TYPE_P:
01527             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
01528             break;
01529         case AV_PICTURE_TYPE_B:
01530             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
01531             break;
01532         case AV_PICTURE_TYPE_S:
01533             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
01534             break;
01535         case AV_PICTURE_TYPE_SI:
01536             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
01537             break;
01538         case AV_PICTURE_TYPE_SP:
01539             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
01540             break;
01541         }
01542         for (y = 0; y < s->mb_height; y++) {
01543             for (x = 0; x < s->mb_width; x++) {
01544                 if (s->avctx->debug & FF_DEBUG_SKIP) {
01545                     int count = s->mbskip_table[x + y * s->mb_stride];
01546                     if (count > 9)
01547                         count = 9;
01548                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01549                 }
01550                 if (s->avctx->debug & FF_DEBUG_QP) {
01551                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01552                            pict->qscale_table[x + y * s->mb_stride]);
01553                 }
01554                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01555                     int mb_type = pict->mb_type[x + y * s->mb_stride];
01556                     // Type & MV direction
01557                     if (IS_PCM(mb_type))
01558                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01559                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01560                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01561                     else if (IS_INTRA4x4(mb_type))
01562                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01563                     else if (IS_INTRA16x16(mb_type))
01564                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01565                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01566                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01567                     else if (IS_DIRECT(mb_type))
01568                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01569                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01570                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01571                     else if (IS_GMC(mb_type))
01572                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01573                     else if (IS_SKIP(mb_type))
01574                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01575                     else if (!USES_LIST(mb_type, 1))
01576                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01577                     else if (!USES_LIST(mb_type, 0))
01578                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01579                     else {
01580                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01581                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01582                     }
01583 
01584                     // segmentation
01585                     if (IS_8X8(mb_type))
01586                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01587                     else if (IS_16X8(mb_type))
01588                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01589                     else if (IS_8X16(mb_type))
01590                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01591                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01592                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01593                     else
01594                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01595 
01596 
01597                     if (IS_INTERLACED(mb_type))
01598                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01599                     else
01600                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01601                 }
01602                 // av_log(s->avctx, AV_LOG_DEBUG, " ");
01603             }
01604             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01605         }
01606     }
01607 
01608     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01609         (s->avctx->debug_mv)) {
01610         const int shift = 1 + s->quarter_sample;
01611         int mb_y;
01612         uint8_t *ptr;
01613         int i;
01614         int h_chroma_shift, v_chroma_shift, block_height;
01615         const int width          = s->avctx->width;
01616         const int height         = s->avctx->height;
01617         const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01618         const int mv_stride      = (s->mb_width << mv_sample_log2) +
01619                                    (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01620         s->low_delay = 0; // needed to see the vectors without trashing the buffers
01621 
01622         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01623                                       &h_chroma_shift, &v_chroma_shift);
01624         for (i = 0; i < 3; i++) {
01625             memcpy(s->visualization_buffer[i], pict->data[i],
01626                    (i == 0) ? pict->linesize[i] * height:
01627                               pict->linesize[i] * height >> v_chroma_shift);
01628             pict->data[i] = s->visualization_buffer[i];
01629         }
01630         pict->type   = FF_BUFFER_TYPE_COPY;
01631         ptr          = pict->data[0];
01632         block_height = 16 >> v_chroma_shift;
01633 
01634         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01635             int mb_x;
01636             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01637                 const int mb_index = mb_x + mb_y * s->mb_stride;
01638                 if ((s->avctx->debug_mv) && pict->motion_val) {
01639                     int type;
01640                     for (type = 0; type < 3; type++) {
01641                         int direction = 0;
01642                         switch (type) {
01643                         case 0:
01644                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01645                                 (pict->pict_type!= AV_PICTURE_TYPE_P))
01646                                 continue;
01647                             direction = 0;
01648                             break;
01649                         case 1:
01650                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01651                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01652                                 continue;
01653                             direction = 0;
01654                             break;
01655                         case 2:
01656                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01657                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01658                                 continue;
01659                             direction = 1;
01660                             break;
01661                         }
01662                         if (!USES_LIST(pict->mb_type[mb_index], direction))
01663                             continue;
01664 
01665                         if (IS_8X8(pict->mb_type[mb_index])) {
01666                             int i;
01667                             for (i = 0; i < 4; i++) {
01668                                 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01669                                 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01670                                 int xy = (mb_x * 2 + (i & 1) +
01671                                           (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01672                                 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01673                                 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01674                                 draw_arrow(ptr, sx, sy, mx, my, width,
01675                                            height, s->linesize, 100);
01676                             }
01677                         } else if (IS_16X8(pict->mb_type[mb_index])) {
01678                             int i;
01679                             for (i = 0; i < 2; i++) {
01680                                 int sx = mb_x * 16 + 8;
01681                                 int sy = mb_y * 16 + 4 + 8 * i;
01682                                 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01683                                 int mx = (pict->motion_val[direction][xy][0] >> shift);
01684                                 int my = (pict->motion_val[direction][xy][1] >> shift);
01685 
01686                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01687                                     my *= 2;
01688 
01689                             draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01690                                        height, s->linesize, 100);
01691                             }
01692                         } else if (IS_8X16(pict->mb_type[mb_index])) {
01693                             int i;
01694                             for (i = 0; i < 2; i++) {
01695                                 int sx = mb_x * 16 + 4 + 8 * i;
01696                                 int sy = mb_y * 16 + 8;
01697                                 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01698                                 int mx = pict->motion_val[direction][xy][0] >> shift;
01699                                 int my = pict->motion_val[direction][xy][1] >> shift;
01700 
01701                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01702                                     my *= 2;
01703 
01704                                 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01705                                            height, s->linesize, 100);
01706                             }
01707                         } else {
01708                               int sx = mb_x * 16 + 8;
01709                               int sy = mb_y * 16 + 8;
01710                               int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
01711                               int mx = pict->motion_val[direction][xy][0] >> shift + sx;
01712                               int my = pict->motion_val[direction][xy][1] >> shift + sy;
01713                               draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01714                         }
01715                     }
01716                 }
01717                 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01718                     uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01719                                  0x0101010101010101ULL;
01720                     int y;
01721                     for (y = 0; y < block_height; y++) {
01722                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01723                                       (block_height * mb_y + y) *
01724                                       pict->linesize[1]) = c;
01725                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01726                                       (block_height * mb_y + y) *
01727                                       pict->linesize[2]) = c;
01728                     }
01729                 }
01730                 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01731                     pict->motion_val) {
01732                     int mb_type = pict->mb_type[mb_index];
01733                     uint64_t u,v;
01734                     int y;
01735 #define COLOR(theta, r) \
01736     u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01737     v = (int)(128 + r * sin(theta * 3.141592 / 180));
01738 
01739 
01740                     u = v = 128;
01741                     if (IS_PCM(mb_type)) {
01742                         COLOR(120, 48)
01743                     } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01744                                IS_INTRA16x16(mb_type)) {
01745                         COLOR(30, 48)
01746                     } else if (IS_INTRA4x4(mb_type)) {
01747                         COLOR(90, 48)
01748                     } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01749                         // COLOR(120, 48)
01750                     } else if (IS_DIRECT(mb_type)) {
01751                         COLOR(150, 48)
01752                     } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01753                         COLOR(170, 48)
01754                     } else if (IS_GMC(mb_type)) {
01755                         COLOR(190, 48)
01756                     } else if (IS_SKIP(mb_type)) {
01757                         // COLOR(180, 48)
01758                     } else if (!USES_LIST(mb_type, 1)) {
01759                         COLOR(240, 48)
01760                     } else if (!USES_LIST(mb_type, 0)) {
01761                         COLOR(0, 48)
01762                     } else {
01763                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01764                         COLOR(300,48)
01765                     }
01766 
01767                     u *= 0x0101010101010101ULL;
01768                     v *= 0x0101010101010101ULL;
01769                     for (y = 0; y < block_height; y++) {
01770                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01771                                       (block_height * mb_y + y) * pict->linesize[1]) = u;
01772                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01773                                       (block_height * mb_y + y) * pict->linesize[2]) = v;
01774                     }
01775 
01776                     // segmentation
01777                     if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01778                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01779                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01780                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01781                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01782                     }
01783                     if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01784                         for (y = 0; y < 16; y++)
01785                             pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01786                                           pict->linesize[0]] ^= 0x80;
01787                     }
01788                     if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01789                         int dm = 1 << (mv_sample_log2 - 2);
01790                         for (i = 0; i < 4; i++) {
01791                             int sx = mb_x * 16 + 8 * (i & 1);
01792                             int sy = mb_y * 16 + 8 * (i >> 1);
01793                             int xy = (mb_x * 2 + (i & 1) +
01794                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01795                             // FIXME bidir
01796                             int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01797                             if (mv[0] != mv[dm] ||
01798                                 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01799                                 for (y = 0; y < 8; y++)
01800                                     pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01801                             if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01802                                 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01803                                               pict->linesize[0]) ^= 0x8080808080808080ULL;
01804                         }
01805                     }
01806 
01807                     if (IS_INTERLACED(mb_type) &&
01808                         s->codec_id == CODEC_ID_H264) {
01809                         // hmm
01810                     }
01811                 }
01812                 s->mbskip_table[mb_index] = 0;
01813             }
01814         }
01815     }
01816 }
01817 
01818 static inline int hpel_motion_lowres(MpegEncContext *s,
01819                                      uint8_t *dest, uint8_t *src,
01820                                      int field_based, int field_select,
01821                                      int src_x, int src_y,
01822                                      int width, int height, int stride,
01823                                      int h_edge_pos, int v_edge_pos,
01824                                      int w, int h, h264_chroma_mc_func *pix_op,
01825                                      int motion_x, int motion_y)
01826 {
01827     const int lowres   = s->avctx->lowres;
01828     const int op_index = FFMIN(lowres, 2);
01829     const int s_mask   = (2 << lowres) - 1;
01830     int emu = 0;
01831     int sx, sy;
01832 
01833     if (s->quarter_sample) {
01834         motion_x /= 2;
01835         motion_y /= 2;
01836     }
01837 
01838     sx = motion_x & s_mask;
01839     sy = motion_y & s_mask;
01840     src_x += motion_x >> lowres + 1;
01841     src_y += motion_y >> lowres + 1;
01842 
01843     src   += src_y * stride + src_x;
01844 
01845     if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
01846         (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01847         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01848                                 (h + 1) << field_based, src_x,
01849                                 src_y   << field_based,
01850                                 h_edge_pos,
01851                                 v_edge_pos);
01852         src = s->edge_emu_buffer;
01853         emu = 1;
01854     }
01855 
01856     sx = (sx << 2) >> lowres;
01857     sy = (sy << 2) >> lowres;
01858     if (field_select)
01859         src += s->linesize;
01860     pix_op[op_index](dest, src, stride, h, sx, sy);
01861     return emu;
01862 }
01863 
01864 /* apply one mpeg motion vector to the three components */
01865 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01866                                                 uint8_t *dest_y,
01867                                                 uint8_t *dest_cb,
01868                                                 uint8_t *dest_cr,
01869                                                 int field_based,
01870                                                 int bottom_field,
01871                                                 int field_select,
01872                                                 uint8_t **ref_picture,
01873                                                 h264_chroma_mc_func *pix_op,
01874                                                 int motion_x, int motion_y,
01875                                                 int h, int mb_y)
01876 {
01877     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01878     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01879         uvsx, uvsy;
01880     const int lowres     = s->avctx->lowres;
01881     const int op_index   = FFMIN(lowres, 2);
01882     const int block_s    = 8>>lowres;
01883     const int s_mask     = (2 << lowres) - 1;
01884     const int h_edge_pos = s->h_edge_pos >> lowres;
01885     const int v_edge_pos = s->v_edge_pos >> lowres;
01886     linesize   = s->current_picture.f.linesize[0] << field_based;
01887     uvlinesize = s->current_picture.f.linesize[1] << field_based;
01888 
01889     // FIXME obviously not perfect but qpel will not work in lowres anyway
01890     if (s->quarter_sample) {
01891         motion_x /= 2;
01892         motion_y /= 2;
01893     }
01894 
01895     if (field_based) {
01896         motion_y += (bottom_field - field_select) * (1 << lowres - 1);
01897     }
01898 
01899     sx = motion_x & s_mask;
01900     sy = motion_y & s_mask;
01901     src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01902     src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01903 
01904     if (s->out_format == FMT_H263) {
01905         uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
01906         uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
01907         uvsrc_x = src_x >> 1;
01908         uvsrc_y = src_y >> 1;
01909     } else if (s->out_format == FMT_H261) {
01910         // even chroma mv's are full pel in H261
01911         mx      = motion_x / 4;
01912         my      = motion_y / 4;
01913         uvsx    = (2 * mx) & s_mask;
01914         uvsy    = (2 * my) & s_mask;
01915         uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01916         uvsrc_y =    mb_y * block_s + (my >> lowres);
01917     } else {
01918         mx      = motion_x / 2;
01919         my      = motion_y / 2;
01920         uvsx    = mx & s_mask;
01921         uvsy    = my & s_mask;
01922         uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
01923         uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
01924     }
01925 
01926     ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
01927     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01928     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01929 
01930     if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) ||
01931         (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01932         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01933                                 s->linesize, 17, 17 + field_based,
01934                                 src_x, src_y << field_based, h_edge_pos,
01935                                 v_edge_pos);
01936         ptr_y = s->edge_emu_buffer;
01937         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01938             uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01939             s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01940                                     9 + field_based,
01941                                     uvsrc_x, uvsrc_y << field_based,
01942                                     h_edge_pos >> 1, v_edge_pos >> 1);
01943             s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01944                                     9 + field_based,
01945                                     uvsrc_x, uvsrc_y << field_based,
01946                                     h_edge_pos >> 1, v_edge_pos >> 1);
01947             ptr_cb = uvbuf;
01948             ptr_cr = uvbuf + 16;
01949         }
01950     }
01951 
01952     // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
01953     if (bottom_field) {
01954         dest_y  += s->linesize;
01955         dest_cb += s->uvlinesize;
01956         dest_cr += s->uvlinesize;
01957     }
01958 
01959     if (field_select) {
01960         ptr_y   += s->linesize;
01961         ptr_cb  += s->uvlinesize;
01962         ptr_cr  += s->uvlinesize;
01963     }
01964 
01965     sx = (sx << 2) >> lowres;
01966     sy = (sy << 2) >> lowres;
01967     pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01968 
01969     if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01970         uvsx = (uvsx << 2) >> lowres;
01971         uvsy = (uvsy << 2) >> lowres;
01972         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
01973                          uvsx, uvsy);
01974         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
01975                          uvsx, uvsy);
01976     }
01977     // FIXME h261 lowres loop filter
01978 }
01979 
01980 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01981                                             uint8_t *dest_cb, uint8_t *dest_cr,
01982                                             uint8_t **ref_picture,
01983                                             h264_chroma_mc_func * pix_op,
01984                                             int mx, int my)
01985 {
01986     const int lowres     = s->avctx->lowres;
01987     const int op_index   = FFMIN(lowres, 2);
01988     const int block_s    = 8 >> lowres;
01989     const int s_mask     = (2 << lowres) - 1;
01990     const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01991     const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01992     int emu = 0, src_x, src_y, offset, sx, sy;
01993     uint8_t *ptr;
01994 
01995     if (s->quarter_sample) {
01996         mx /= 2;
01997         my /= 2;
01998     }
01999 
02000     /* In case of 8X8, we construct a single chroma motion vector
02001        with a special rounding */
02002     mx = ff_h263_round_chroma(mx);
02003     my = ff_h263_round_chroma(my);
02004 
02005     sx = mx & s_mask;
02006     sy = my & s_mask;
02007     src_x = s->mb_x * block_s + (mx >> lowres + 1);
02008     src_y = s->mb_y * block_s + (my >> lowres + 1);
02009 
02010     offset = src_y * s->uvlinesize + src_x;
02011     ptr = ref_picture[1] + offset;
02012     if (s->flags & CODEC_FLAG_EMU_EDGE) {
02013         if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02014             (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02015             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02016                                     9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02017             ptr = s->edge_emu_buffer;
02018             emu = 1;
02019         }
02020     }
02021     sx = (sx << 2) >> lowres;
02022     sy = (sy << 2) >> lowres;
02023     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02024 
02025     ptr = ref_picture[2] + offset;
02026     if (emu) {
02027         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02028                                 src_x, src_y, h_edge_pos, v_edge_pos);
02029         ptr = s->edge_emu_buffer;
02030     }
02031     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02032 }
02033 
02045 static inline void MPV_motion_lowres(MpegEncContext *s,
02046                                      uint8_t *dest_y, uint8_t *dest_cb,
02047                                      uint8_t *dest_cr,
02048                                      int dir, uint8_t **ref_picture,
02049                                      h264_chroma_mc_func *pix_op)
02050 {
02051     int mx, my;
02052     int mb_x, mb_y, i;
02053     const int lowres  = s->avctx->lowres;
02054     const int block_s = 8 >>lowres;
02055 
02056     mb_x = s->mb_x;
02057     mb_y = s->mb_y;
02058 
02059     switch (s->mv_type) {
02060     case MV_TYPE_16X16:
02061         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02062                            0, 0, 0,
02063                            ref_picture, pix_op,
02064                            s->mv[dir][0][0], s->mv[dir][0][1],
02065                            2 * block_s, mb_y);
02066         break;
02067     case MV_TYPE_8X8:
02068         mx = 0;
02069         my = 0;
02070         for (i = 0; i < 4; i++) {
02071             hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02072                                s->linesize) * block_s,
02073                                ref_picture[0], 0, 0,
02074                                (2 * mb_x + (i & 1)) * block_s,
02075                                (2 * mb_y + (i >> 1)) * block_s,
02076                                s->width, s->height, s->linesize,
02077                                s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02078                                block_s, block_s, pix_op,
02079                                s->mv[dir][i][0], s->mv[dir][i][1]);
02080 
02081             mx += s->mv[dir][i][0];
02082             my += s->mv[dir][i][1];
02083         }
02084 
02085         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02086             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02087                                      pix_op, mx, my);
02088         break;
02089     case MV_TYPE_FIELD:
02090         if (s->picture_structure == PICT_FRAME) {
02091             /* top field */
02092             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02093                                1, 0, s->field_select[dir][0],
02094                                ref_picture, pix_op,
02095                                s->mv[dir][0][0], s->mv[dir][0][1],
02096                                block_s, mb_y);
02097             /* bottom field */
02098             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02099                                1, 1, s->field_select[dir][1],
02100                                ref_picture, pix_op,
02101                                s->mv[dir][1][0], s->mv[dir][1][1],
02102                                block_s, mb_y);
02103         } else {
02104             if (s->picture_structure != s->field_select[dir][0] + 1 &&
02105                 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02106                 ref_picture = s->current_picture_ptr->f.data;
02107 
02108             }
02109             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02110                                0, 0, s->field_select[dir][0],
02111                                ref_picture, pix_op,
02112                                s->mv[dir][0][0],
02113                                s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02114             }
02115         break;
02116     case MV_TYPE_16X8:
02117         for (i = 0; i < 2; i++) {
02118             uint8_t **ref2picture;
02119 
02120             if (s->picture_structure == s->field_select[dir][i] + 1 ||
02121                 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02122                 ref2picture = ref_picture;
02123             } else {
02124                 ref2picture = s->current_picture_ptr->f.data;
02125             }
02126 
02127             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02128                                0, 0, s->field_select[dir][i],
02129                                ref2picture, pix_op,
02130                                s->mv[dir][i][0], s->mv[dir][i][1] +
02131                                2 * block_s * i, block_s, mb_y >> 1);
02132 
02133             dest_y  +=  2 * block_s *  s->linesize;
02134             dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02135             dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02136         }
02137         break;
02138     case MV_TYPE_DMV:
02139         if (s->picture_structure == PICT_FRAME) {
02140             for (i = 0; i < 2; i++) {
02141                 int j;
02142                 for (j = 0; j < 2; j++) {
02143                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02144                                        1, j, j ^ i,
02145                                        ref_picture, pix_op,
02146                                        s->mv[dir][2 * i + j][0],
02147                                        s->mv[dir][2 * i + j][1],
02148                                        block_s, mb_y);
02149                 }
02150                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02151             }
02152         } else {
02153             for (i = 0; i < 2; i++) {
02154                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02155                                    0, 0, s->picture_structure != i + 1,
02156                                    ref_picture, pix_op,
02157                                    s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02158                                    2 * block_s, mb_y >> 1);
02159 
02160                 // after put we make avg of the same block
02161                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02162 
02163                 // opposite parity is always in the same
02164                 // frame if this is second field
02165                 if (!s->first_field) {
02166                     ref_picture = s->current_picture_ptr->f.data;
02167                 }
02168             }
02169         }
02170         break;
02171     default:
02172         assert(0);
02173     }
02174 }
02175 
02179 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02180 {
02181     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02182     int my, off, i, mvs;
02183 
02184     if (s->picture_structure != PICT_FRAME) goto unhandled;
02185 
02186     switch (s->mv_type) {
02187         case MV_TYPE_16X16:
02188             mvs = 1;
02189             break;
02190         case MV_TYPE_16X8:
02191             mvs = 2;
02192             break;
02193         case MV_TYPE_8X8:
02194             mvs = 4;
02195             break;
02196         default:
02197             goto unhandled;
02198     }
02199 
02200     for (i = 0; i < mvs; i++) {
02201         my = s->mv[dir][i][1]<<qpel_shift;
02202         my_max = FFMAX(my_max, my);
02203         my_min = FFMIN(my_min, my);
02204     }
02205 
02206     off = (FFMAX(-my_min, my_max) + 63) >> 6;
02207 
02208     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02209 unhandled:
02210     return s->mb_height-1;
02211 }
02212 
02213 /* put block[] to dest[] */
02214 static inline void put_dct(MpegEncContext *s,
02215                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02216 {
02217     s->dct_unquantize_intra(s, block, i, qscale);
02218     s->dsp.idct_put (dest, line_size, block);
02219 }
02220 
02221 /* add block[] to dest[] */
02222 static inline void add_dct(MpegEncContext *s,
02223                            DCTELEM *block, int i, uint8_t *dest, int line_size)
02224 {
02225     if (s->block_last_index[i] >= 0) {
02226         s->dsp.idct_add (dest, line_size, block);
02227     }
02228 }
02229 
02230 static inline void add_dequant_dct(MpegEncContext *s,
02231                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02232 {
02233     if (s->block_last_index[i] >= 0) {
02234         s->dct_unquantize_inter(s, block, i, qscale);
02235 
02236         s->dsp.idct_add (dest, line_size, block);
02237     }
02238 }
02239 
02243 void ff_clean_intra_table_entries(MpegEncContext *s)
02244 {
02245     int wrap = s->b8_stride;
02246     int xy = s->block_index[0];
02247 
02248     s->dc_val[0][xy           ] =
02249     s->dc_val[0][xy + 1       ] =
02250     s->dc_val[0][xy     + wrap] =
02251     s->dc_val[0][xy + 1 + wrap] = 1024;
02252     /* ac pred */
02253     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
02254     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02255     if (s->msmpeg4_version>=3) {
02256         s->coded_block[xy           ] =
02257         s->coded_block[xy + 1       ] =
02258         s->coded_block[xy     + wrap] =
02259         s->coded_block[xy + 1 + wrap] = 0;
02260     }
02261     /* chroma */
02262     wrap = s->mb_stride;
02263     xy = s->mb_x + s->mb_y * wrap;
02264     s->dc_val[1][xy] =
02265     s->dc_val[2][xy] = 1024;
02266     /* ac pred */
02267     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02268     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02269 
02270     s->mbintra_table[xy]= 0;
02271 }
02272 
02273 /* generic function called after a macroblock has been parsed by the
02274    decoder or after it has been encoded by the encoder.
02275 
02276    Important variables used:
02277    s->mb_intra : true if intra macroblock
02278    s->mv_dir   : motion vector direction
02279    s->mv_type  : motion vector type
02280    s->mv       : motion vector
02281    s->interlaced_dct : true if interlaced dct used (mpeg2)
02282  */
02283 static av_always_inline
02284 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02285                             int lowres_flag, int is_mpeg12)
02286 {
02287     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02288     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02289         ff_xvmc_decode_mb(s);//xvmc uses pblocks
02290         return;
02291     }
02292 
02293     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02294        /* save DCT coefficients */
02295        int i,j;
02296        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02297        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02298        for(i=0; i<6; i++){
02299            for(j=0; j<64; j++){
02300                *dct++ = block[i][s->dsp.idct_permutation[j]];
02301                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02302            }
02303            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02304        }
02305     }
02306 
02307     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02308 
02309     /* update DC predictors for P macroblocks */
02310     if (!s->mb_intra) {
02311         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02312             if(s->mbintra_table[mb_xy])
02313                 ff_clean_intra_table_entries(s);
02314         } else {
02315             s->last_dc[0] =
02316             s->last_dc[1] =
02317             s->last_dc[2] = 128 << s->intra_dc_precision;
02318         }
02319     }
02320     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02321         s->mbintra_table[mb_xy]=1;
02322 
02323     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
02324         uint8_t *dest_y, *dest_cb, *dest_cr;
02325         int dct_linesize, dct_offset;
02326         op_pixels_func (*op_pix)[4];
02327         qpel_mc_func (*op_qpix)[16];
02328         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02329         const int uvlinesize = s->current_picture.f.linesize[1];
02330         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02331         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02332 
02333         /* avoid copy if macroblock skipped in last frame too */
02334         /* skip only during decoding as we might trash the buffers during encoding a bit */
02335         if(!s->encoding){
02336             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02337 
02338             if (s->mb_skipped) {
02339                 s->mb_skipped= 0;
02340                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02341                 *mbskip_ptr = 1;
02342             } else if(!s->current_picture.f.reference) {
02343                 *mbskip_ptr = 1;
02344             } else{
02345                 *mbskip_ptr = 0; /* not skipped */
02346             }
02347         }
02348 
02349         dct_linesize = linesize << s->interlaced_dct;
02350         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
02351 
02352         if(readable){
02353             dest_y=  s->dest[0];
02354             dest_cb= s->dest[1];
02355             dest_cr= s->dest[2];
02356         }else{
02357             dest_y = s->b_scratchpad;
02358             dest_cb= s->b_scratchpad+16*linesize;
02359             dest_cr= s->b_scratchpad+32*linesize;
02360         }
02361 
02362         if (!s->mb_intra) {
02363             /* motion handling */
02364             /* decoding or more than one mb_type (MC was already done otherwise) */
02365             if(!s->encoding){
02366 
02367                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02368                     if (s->mv_dir & MV_DIR_FORWARD) {
02369                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02370                     }
02371                     if (s->mv_dir & MV_DIR_BACKWARD) {
02372                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02373                     }
02374                 }
02375 
02376                 if(lowres_flag){
02377                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02378 
02379                     if (s->mv_dir & MV_DIR_FORWARD) {
02380                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02381                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02382                     }
02383                     if (s->mv_dir & MV_DIR_BACKWARD) {
02384                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02385                     }
02386                 }else{
02387                     op_qpix= s->me.qpel_put;
02388                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02389                         op_pix = s->dsp.put_pixels_tab;
02390                     }else{
02391                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02392                     }
02393                     if (s->mv_dir & MV_DIR_FORWARD) {
02394                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02395                         op_pix = s->dsp.avg_pixels_tab;
02396                         op_qpix= s->me.qpel_avg;
02397                     }
02398                     if (s->mv_dir & MV_DIR_BACKWARD) {
02399                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02400                     }
02401                 }
02402             }
02403 
02404             /* skip dequant / idct if we are really late ;) */
02405             if(s->avctx->skip_idct){
02406                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02407                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02408                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02409                     goto skip_idct;
02410             }
02411 
02412             /* add dct residue */
02413             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02414                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02415                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02416                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02417                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02418                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02419 
02420                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02421                     if (s->chroma_y_shift){
02422                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02423                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02424                     }else{
02425                         dct_linesize >>= 1;
02426                         dct_offset >>=1;
02427                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02428                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02429                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02430                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02431                     }
02432                 }
02433             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02434                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02435                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02436                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02437                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02438 
02439                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02440                     if(s->chroma_y_shift){//Chroma420
02441                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02442                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02443                     }else{
02444                         //chroma422
02445                         dct_linesize = uvlinesize << s->interlaced_dct;
02446                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02447 
02448                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02449                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02450                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02451                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02452                         if(!s->chroma_x_shift){//Chroma444
02453                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02454                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02455                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02456                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02457                         }
02458                     }
02459                 }//fi gray
02460             }
02461             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02462                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02463             }
02464         } else {
02465             /* dct only in intra block */
02466             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02467                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02468                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02469                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02470                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02471 
02472                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02473                     if(s->chroma_y_shift){
02474                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02475                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02476                     }else{
02477                         dct_offset >>=1;
02478                         dct_linesize >>=1;
02479                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02480                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02481                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02482                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02483                     }
02484                 }
02485             }else{
02486                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02487                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02488                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02489                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02490 
02491                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02492                     if(s->chroma_y_shift){
02493                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02494                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02495                     }else{
02496 
02497                         dct_linesize = uvlinesize << s->interlaced_dct;
02498                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02499 
02500                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02501                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02502                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02503                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02504                         if(!s->chroma_x_shift){//Chroma444
02505                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02506                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02507                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02508                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02509                         }
02510                     }
02511                 }//gray
02512             }
02513         }
02514 skip_idct:
02515         if(!readable){
02516             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02517             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02518             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02519         }
02520     }
02521 }
02522 
02523 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02524 #if !CONFIG_SMALL
02525     if(s->out_format == FMT_MPEG1) {
02526         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02527         else                 MPV_decode_mb_internal(s, block, 0, 1);
02528     } else
02529 #endif
02530     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02531     else                  MPV_decode_mb_internal(s, block, 0, 0);
02532 }
02533 
02537 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02538     const int field_pic= s->picture_structure != PICT_FRAME;
02539     if(field_pic){
02540         h <<= 1;
02541         y <<= 1;
02542     }
02543 
02544     if (!s->avctx->hwaccel
02545        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02546        && s->unrestricted_mv
02547        && s->current_picture.f.reference
02548        && !s->intra_only
02549        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02550         int sides = 0, edge_h;
02551         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02552         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02553         if (y==0) sides |= EDGE_TOP;
02554         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02555 
02556         edge_h= FFMIN(h, s->v_edge_pos - y);
02557 
02558         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
02559                           s->linesize,           s->h_edge_pos,         edge_h,
02560                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02561         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02562                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02563                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02564         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02565                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02566                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02567     }
02568 
02569     h= FFMIN(h, s->avctx->height - y);
02570 
02571     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02572 
02573     if (s->avctx->draw_horiz_band) {
02574         AVFrame *src;
02575         int offset[AV_NUM_DATA_POINTERS];
02576         int i;
02577 
02578         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02579             src= (AVFrame*)s->current_picture_ptr;
02580         else if(s->last_picture_ptr)
02581             src= (AVFrame*)s->last_picture_ptr;
02582         else
02583             return;
02584 
02585         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02586             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02587                 offset[i] = 0;
02588         }else{
02589             offset[0]= y * s->linesize;
02590             offset[1]=
02591             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02592             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02593                 offset[i] = 0;
02594         }
02595 
02596         emms_c();
02597 
02598         s->avctx->draw_horiz_band(s->avctx, src, offset,
02599                                   y, s->picture_structure, h);
02600     }
02601 }
02602 
02603 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02604     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02605     const int uvlinesize = s->current_picture.f.linesize[1];
02606     const int mb_size= 4 - s->avctx->lowres;
02607 
02608     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02609     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02610     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02611     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02612     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02613     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02614     //block_index is not used by mpeg2, so it is not affected by chroma_format
02615 
02616     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
02617     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02618     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02619 
02620     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02621     {
02622         if(s->picture_structure==PICT_FRAME){
02623         s->dest[0] += s->mb_y *   linesize << mb_size;
02624         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02625         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02626         }else{
02627             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02628             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02629             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02630             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02631         }
02632     }
02633 }
02634 
02635 void ff_mpeg_flush(AVCodecContext *avctx){
02636     int i;
02637     MpegEncContext *s = avctx->priv_data;
02638 
02639     if(s==NULL || s->picture==NULL)
02640         return;
02641 
02642     for(i=0; i<s->picture_count; i++){
02643        if (s->picture[i].f.data[0] &&
02644            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02645             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02646         free_frame_buffer(s, &s->picture[i]);
02647     }
02648     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02649 
02650     s->mb_x= s->mb_y= 0;
02651 
02652     s->parse_context.state= -1;
02653     s->parse_context.frame_start_found= 0;
02654     s->parse_context.overread= 0;
02655     s->parse_context.overread_index= 0;
02656     s->parse_context.index= 0;
02657     s->parse_context.last_index= 0;
02658     s->bitstream_buffer_size=0;
02659     s->pp_time=0;
02660 }
02661 
02662 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02663                                    DCTELEM *block, int n, int qscale)
02664 {
02665     int i, level, nCoeffs;
02666     const uint16_t *quant_matrix;
02667 
02668     nCoeffs= s->block_last_index[n];
02669 
02670     if (n < 4)
02671         block[0] = block[0] * s->y_dc_scale;
02672     else
02673         block[0] = block[0] * s->c_dc_scale;
02674     /* XXX: only mpeg1 */
02675     quant_matrix = s->intra_matrix;
02676     for(i=1;i<=nCoeffs;i++) {
02677         int j= s->intra_scantable.permutated[i];
02678         level = block[j];
02679         if (level) {
02680             if (level < 0) {
02681                 level = -level;
02682                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02683                 level = (level - 1) | 1;
02684                 level = -level;
02685             } else {
02686                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02687                 level = (level - 1) | 1;
02688             }
02689             block[j] = level;
02690         }
02691     }
02692 }
02693 
02694 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02695                                    DCTELEM *block, int n, int qscale)
02696 {
02697     int i, level, nCoeffs;
02698     const uint16_t *quant_matrix;
02699 
02700     nCoeffs= s->block_last_index[n];
02701 
02702     quant_matrix = s->inter_matrix;
02703     for(i=0; i<=nCoeffs; i++) {
02704         int j= s->intra_scantable.permutated[i];
02705         level = block[j];
02706         if (level) {
02707             if (level < 0) {
02708                 level = -level;
02709                 level = (((level << 1) + 1) * qscale *
02710                          ((int) (quant_matrix[j]))) >> 4;
02711                 level = (level - 1) | 1;
02712                 level = -level;
02713             } else {
02714                 level = (((level << 1) + 1) * qscale *
02715                          ((int) (quant_matrix[j]))) >> 4;
02716                 level = (level - 1) | 1;
02717             }
02718             block[j] = level;
02719         }
02720     }
02721 }
02722 
02723 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02724                                    DCTELEM *block, int n, int qscale)
02725 {
02726     int i, level, nCoeffs;
02727     const uint16_t *quant_matrix;
02728 
02729     if(s->alternate_scan) nCoeffs= 63;
02730     else nCoeffs= s->block_last_index[n];
02731 
02732     if (n < 4)
02733         block[0] = block[0] * s->y_dc_scale;
02734     else
02735         block[0] = block[0] * s->c_dc_scale;
02736     quant_matrix = s->intra_matrix;
02737     for(i=1;i<=nCoeffs;i++) {
02738         int j= s->intra_scantable.permutated[i];
02739         level = block[j];
02740         if (level) {
02741             if (level < 0) {
02742                 level = -level;
02743                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02744                 level = -level;
02745             } else {
02746                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02747             }
02748             block[j] = level;
02749         }
02750     }
02751 }
02752 
02753 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02754                                    DCTELEM *block, int n, int qscale)
02755 {
02756     int i, level, nCoeffs;
02757     const uint16_t *quant_matrix;
02758     int sum=-1;
02759 
02760     if(s->alternate_scan) nCoeffs= 63;
02761     else nCoeffs= s->block_last_index[n];
02762 
02763     if (n < 4)
02764         block[0] = block[0] * s->y_dc_scale;
02765     else
02766         block[0] = block[0] * s->c_dc_scale;
02767     quant_matrix = s->intra_matrix;
02768     for(i=1;i<=nCoeffs;i++) {
02769         int j= s->intra_scantable.permutated[i];
02770         level = block[j];
02771         if (level) {
02772             if (level < 0) {
02773                 level = -level;
02774                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02775                 level = -level;
02776             } else {
02777                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02778             }
02779             block[j] = level;
02780             sum+=level;
02781         }
02782     }
02783     block[63]^=sum&1;
02784 }
02785 
02786 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02787                                    DCTELEM *block, int n, int qscale)
02788 {
02789     int i, level, nCoeffs;
02790     const uint16_t *quant_matrix;
02791     int sum=-1;
02792 
02793     if(s->alternate_scan) nCoeffs= 63;
02794     else nCoeffs= s->block_last_index[n];
02795 
02796     quant_matrix = s->inter_matrix;
02797     for(i=0; i<=nCoeffs; i++) {
02798         int j= s->intra_scantable.permutated[i];
02799         level = block[j];
02800         if (level) {
02801             if (level < 0) {
02802                 level = -level;
02803                 level = (((level << 1) + 1) * qscale *
02804                          ((int) (quant_matrix[j]))) >> 4;
02805                 level = -level;
02806             } else {
02807                 level = (((level << 1) + 1) * qscale *
02808                          ((int) (quant_matrix[j]))) >> 4;
02809             }
02810             block[j] = level;
02811             sum+=level;
02812         }
02813     }
02814     block[63]^=sum&1;
02815 }
02816 
02817 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02818                                   DCTELEM *block, int n, int qscale)
02819 {
02820     int i, level, qmul, qadd;
02821     int nCoeffs;
02822 
02823     assert(s->block_last_index[n]>=0);
02824 
02825     qmul = qscale << 1;
02826 
02827     if (!s->h263_aic) {
02828         if (n < 4)
02829             block[0] = block[0] * s->y_dc_scale;
02830         else
02831             block[0] = block[0] * s->c_dc_scale;
02832         qadd = (qscale - 1) | 1;
02833     }else{
02834         qadd = 0;
02835     }
02836     if(s->ac_pred)
02837         nCoeffs=63;
02838     else
02839         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02840 
02841     for(i=1; i<=nCoeffs; i++) {
02842         level = block[i];
02843         if (level) {
02844             if (level < 0) {
02845                 level = level * qmul - qadd;
02846             } else {
02847                 level = level * qmul + qadd;
02848             }
02849             block[i] = level;
02850         }
02851     }
02852 }
02853 
02854 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02855                                   DCTELEM *block, int n, int qscale)
02856 {
02857     int i, level, qmul, qadd;
02858     int nCoeffs;
02859 
02860     assert(s->block_last_index[n]>=0);
02861 
02862     qadd = (qscale - 1) | 1;
02863     qmul = qscale << 1;
02864 
02865     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02866 
02867     for(i=0; i<=nCoeffs; i++) {
02868         level = block[i];
02869         if (level) {
02870             if (level < 0) {
02871                 level = level * qmul - qadd;
02872             } else {
02873                 level = level * qmul + qadd;
02874             }
02875             block[i] = level;
02876         }
02877     }
02878 }
02879 
02883 void ff_set_qscale(MpegEncContext * s, int qscale)
02884 {
02885     if (qscale < 1)
02886         qscale = 1;
02887     else if (qscale > 31)
02888         qscale = 31;
02889 
02890     s->qscale = qscale;
02891     s->chroma_qscale= s->chroma_qscale_table[qscale];
02892 
02893     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02894     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02895 }
02896 
02897 void MPV_report_decode_progress(MpegEncContext *s)
02898 {
02899     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02900         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02901 }
Generated on Thu Jul 11 2013 15:38:20 for Libav by doxygen 1.7.1