mpeg4videodec.c 154 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114
  1. /*
  2. * MPEG-4 decoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #define UNCHECKED_BITSTREAM_READER 1
  23. #include "config_components.h"
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/internal.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/thread.h"
  28. #include "codec_internal.h"
  29. #include "error_resilience.h"
  30. #include "hwconfig.h"
  31. #include "idctdsp.h"
  32. #include "mpegutils.h"
  33. #include "mpegvideo.h"
  34. #include "mpegvideodata.h"
  35. #include "mpegvideodec.h"
  36. #include "mpegvideo_unquantize.h"
  37. #include "mpeg4video.h"
  38. #include "mpeg4videodata.h"
  39. #include "mpeg4videodec.h"
  40. #include "mpeg4videodefs.h"
  41. #include "h263.h"
  42. #include "h263data.h"
  43. #include "h263dec.h"
  44. #include "internal.h"
  45. #include "profiles.h"
  46. #include "qpeldsp.h"
  47. #include "threadprogress.h"
  48. #include "unary.h"
  49. #if 0 //3IV1 is quite rare and it slows things down a tiny bit
  50. #define IS_3IV1 (s->codec_tag == AV_RL32("3IV1"))
  51. #else
  52. #define IS_3IV1 0
  53. #endif
  54. /* The defines below define the number of bits that are read at once for
  55. * reading vlc values. Changing these may improve speed and data cache needs
  56. * be aware though that decreasing them may need the number of stages that is
  57. * passed to get_vlc* to be increased. */
  58. #define SPRITE_TRAJ_VLC_BITS 6
  59. #define DC_VLC_BITS 9
  60. #define MB_TYPE_B_VLC_BITS 4
  61. #define STUDIO_INTRA_BITS 9
  62. static VLCElem dc_lum[512], dc_chrom[512];
  63. static VLCElem sprite_trajectory[128];
  64. static VLCElem mb_type_b_vlc[16];
  65. static const VLCElem *studio_intra_tab[12];
  66. static VLCElem studio_luma_dc[528];
  67. static VLCElem studio_chroma_dc[528];
  68. static const uint8_t mpeg4_block_count[4] = { 0, 6, 8, 12 };
  69. static const int16_t mb_type_b_map[4] = {
  70. MB_TYPE_DIRECT2 | MB_TYPE_BIDIR_MV,
  71. MB_TYPE_BIDIR_MV | MB_TYPE_16x16,
  72. MB_TYPE_BACKWARD_MV | MB_TYPE_16x16,
  73. MB_TYPE_FORWARD_MV | MB_TYPE_16x16,
  74. };
  75. static inline Mpeg4DecContext *h263_to_mpeg4(H263DecContext *h)
  76. {
  77. av_assert2(h->c.codec_id == AV_CODEC_ID_MPEG4 && h->c.avctx->priv_data == h);
  78. return (Mpeg4DecContext*)h;
  79. }
  80. static void gmc1_motion(MpegEncContext *s, const Mpeg4DecContext *ctx,
  81. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  82. uint8_t *const *ref_picture)
  83. {
  84. const uint8_t *ptr;
  85. int src_x, src_y, motion_x, motion_y;
  86. ptrdiff_t offset, linesize, uvlinesize;
  87. int emu = 0;
  88. motion_x = ctx->sprite_offset[0][0];
  89. motion_y = ctx->sprite_offset[0][1];
  90. src_x = s->mb_x * 16 + (motion_x >> (ctx->sprite_warping_accuracy + 1));
  91. src_y = s->mb_y * 16 + (motion_y >> (ctx->sprite_warping_accuracy + 1));
  92. motion_x *= 1 << (3 - ctx->sprite_warping_accuracy);
  93. motion_y *= 1 << (3 - ctx->sprite_warping_accuracy);
  94. src_x = av_clip(src_x, -16, s->width);
  95. if (src_x == s->width)
  96. motion_x = 0;
  97. src_y = av_clip(src_y, -16, s->height);
  98. if (src_y == s->height)
  99. motion_y = 0;
  100. linesize = s->linesize;
  101. uvlinesize = s->uvlinesize;
  102. ptr = ref_picture[0] + src_y * linesize + src_x;
  103. if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
  104. (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
  105. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  106. linesize, linesize,
  107. 17, 17,
  108. src_x, src_y,
  109. s->h_edge_pos, s->v_edge_pos);
  110. ptr = s->sc.edge_emu_buffer;
  111. }
  112. if ((motion_x | motion_y) & 7) {
  113. ctx->mdsp.gmc1(dest_y, ptr, linesize, 16,
  114. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  115. ctx->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
  116. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  117. } else {
  118. int dxy;
  119. dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
  120. if (s->no_rounding) {
  121. s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  122. } else {
  123. s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
  124. }
  125. }
  126. if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
  127. return;
  128. motion_x = ctx->sprite_offset[1][0];
  129. motion_y = ctx->sprite_offset[1][1];
  130. src_x = s->mb_x * 8 + (motion_x >> (ctx->sprite_warping_accuracy + 1));
  131. src_y = s->mb_y * 8 + (motion_y >> (ctx->sprite_warping_accuracy + 1));
  132. motion_x *= 1 << (3 - ctx->sprite_warping_accuracy);
  133. motion_y *= 1 << (3 - ctx->sprite_warping_accuracy);
  134. src_x = av_clip(src_x, -8, s->width >> 1);
  135. if (src_x == s->width >> 1)
  136. motion_x = 0;
  137. src_y = av_clip(src_y, -8, s->height >> 1);
  138. if (src_y == s->height >> 1)
  139. motion_y = 0;
  140. offset = (src_y * uvlinesize) + src_x;
  141. ptr = ref_picture[1] + offset;
  142. if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
  143. (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
  144. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  145. uvlinesize, uvlinesize,
  146. 9, 9,
  147. src_x, src_y,
  148. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  149. ptr = s->sc.edge_emu_buffer;
  150. emu = 1;
  151. }
  152. ctx->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
  153. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  154. ptr = ref_picture[2] + offset;
  155. if (emu) {
  156. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  157. uvlinesize, uvlinesize,
  158. 9, 9,
  159. src_x, src_y,
  160. s->h_edge_pos >> 1, s->v_edge_pos >> 1);
  161. ptr = s->sc.edge_emu_buffer;
  162. }
  163. ctx->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
  164. motion_x & 15, motion_y & 15, 128 - s->no_rounding);
  165. }
  166. static void gmc_motion(MpegEncContext *s, const Mpeg4DecContext *ctx,
  167. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  168. uint8_t *const *ref_picture)
  169. {
  170. const uint8_t *ptr;
  171. int linesize, uvlinesize;
  172. const int a = ctx->sprite_warping_accuracy;
  173. int ox, oy;
  174. linesize = s->linesize;
  175. uvlinesize = s->uvlinesize;
  176. ptr = ref_picture[0];
  177. ox = ctx->sprite_offset[0][0] + ctx->sprite_delta[0][0] * s->mb_x * 16 +
  178. ctx->sprite_delta[0][1] * s->mb_y * 16;
  179. oy = ctx->sprite_offset[0][1] + ctx->sprite_delta[1][0] * s->mb_x * 16 +
  180. ctx->sprite_delta[1][1] * s->mb_y * 16;
  181. ctx->mdsp.gmc(dest_y, ptr, linesize, 16,
  182. ox, oy,
  183. ctx->sprite_delta[0][0], ctx->sprite_delta[0][1],
  184. ctx->sprite_delta[1][0], ctx->sprite_delta[1][1],
  185. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  186. s->h_edge_pos, s->v_edge_pos);
  187. ctx->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
  188. ox + ctx->sprite_delta[0][0] * 8,
  189. oy + ctx->sprite_delta[1][0] * 8,
  190. ctx->sprite_delta[0][0], ctx->sprite_delta[0][1],
  191. ctx->sprite_delta[1][0], ctx->sprite_delta[1][1],
  192. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  193. s->h_edge_pos, s->v_edge_pos);
  194. if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
  195. return;
  196. ox = ctx->sprite_offset[1][0] + ctx->sprite_delta[0][0] * s->mb_x * 8 +
  197. ctx->sprite_delta[0][1] * s->mb_y * 8;
  198. oy = ctx->sprite_offset[1][1] + ctx->sprite_delta[1][0] * s->mb_x * 8 +
  199. ctx->sprite_delta[1][1] * s->mb_y * 8;
  200. ptr = ref_picture[1];
  201. ctx->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
  202. ox, oy,
  203. ctx->sprite_delta[0][0], ctx->sprite_delta[0][1],
  204. ctx->sprite_delta[1][0], ctx->sprite_delta[1][1],
  205. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  206. (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
  207. ptr = ref_picture[2];
  208. ctx->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
  209. ox, oy,
  210. ctx->sprite_delta[0][0], ctx->sprite_delta[0][1],
  211. ctx->sprite_delta[1][0], ctx->sprite_delta[1][1],
  212. a + 1, (1 << (2 * a + 1)) - s->no_rounding,
  213. (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
  214. }
  215. void ff_mpeg4_mcsel_motion(MpegEncContext *s,
  216. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  217. uint8_t *const *ref_picture)
  218. {
  219. const Mpeg4DecContext *const ctx = (Mpeg4DecContext*)s;
  220. if (ctx->real_sprite_warping_points == 1) {
  221. gmc1_motion(s, ctx, dest_y, dest_cb, dest_cr,
  222. ref_picture);
  223. } else {
  224. gmc_motion(s, ctx, dest_y, dest_cb, dest_cr,
  225. ref_picture);
  226. }
  227. }
  228. void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  229. uint8_t *dest_cr, int block_size, int uvlinesize,
  230. int dct_linesize, int dct_offset)
  231. {
  232. Mpeg4DecContext *const ctx = (Mpeg4DecContext*)s;
  233. const int act_block_size = block_size * 2;
  234. if (ctx->dpcm_direction == 0) {
  235. s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)ctx->block32[0]);
  236. s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)ctx->block32[1]);
  237. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)ctx->block32[2]);
  238. s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)ctx->block32[3]);
  239. dct_linesize = uvlinesize << s->interlaced_dct;
  240. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  241. s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)ctx->block32[4]);
  242. s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)ctx->block32[5]);
  243. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)ctx->block32[6]);
  244. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)ctx->block32[7]);
  245. if (!s->chroma_x_shift){ //Chroma444
  246. s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)ctx->block32[8]);
  247. s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)ctx->block32[9]);
  248. s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)ctx->block32[10]);
  249. s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)ctx->block32[11]);
  250. }
  251. } else if (ctx->dpcm_direction == 1) {
  252. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  253. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  254. for (int i = 0; i < 3; i++) {
  255. const uint16_t *src = ctx->dpcm_macroblock[i];
  256. int vsub = i ? s->chroma_y_shift : 0;
  257. int hsub = i ? s->chroma_x_shift : 0;
  258. int lowres = s->avctx->lowres;
  259. int step = 1 << lowres;
  260. for (int h = 0; h < (16 >> (vsub + lowres)); h++){
  261. for (int w = 0, idx = 0; w < (16 >> (hsub + lowres)); w++, idx += step)
  262. dest_pcm[i][w] = src[idx];
  263. dest_pcm[i] += linesize[i] / 2;
  264. src += (16 >> hsub) * step;
  265. }
  266. }
  267. } else {
  268. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  269. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  270. av_assert2(ctx->dpcm_direction == -1);
  271. for (int i = 0; i < 3; i++) {
  272. const uint16_t *src = ctx->dpcm_macroblock[i];
  273. int vsub = i ? s->chroma_y_shift : 0;
  274. int hsub = i ? s->chroma_x_shift : 0;
  275. int lowres = s->avctx->lowres;
  276. int step = 1 << lowres;
  277. dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub + lowres) - 1);
  278. for (int h = (16 >> (vsub + lowres)) - 1; h >= 0; h--){
  279. for (int w = (16 >> (hsub + lowres)) - 1, idx = 0; w >= 0; w--, idx += step)
  280. dest_pcm[i][w] = src[idx];
  281. src += step * (16 >> hsub);
  282. dest_pcm[i] -= linesize[i] / 2;
  283. }
  284. }
  285. }
  286. }
  287. /**
  288. * Predict the ac.
  289. * @param n block index (0-3 are luma, 4-5 are chroma)
  290. * @param dir the ac prediction direction
  291. */
  292. void ff_mpeg4_pred_ac(H263DecContext *const h, int16_t *block, int n, int dir)
  293. {
  294. int i;
  295. int16_t *ac_val, *ac_val1;
  296. int8_t *const qscale_table = h->c.cur_pic.qscale_table;
  297. /* find prediction */
  298. ac_val = &h->c.ac_val[0][0] + h->c.block_index[n] * 16;
  299. ac_val1 = ac_val;
  300. if (h->c.ac_pred) {
  301. if (dir == 0) {
  302. const int xy = h->c.mb_x - 1 + h->c.mb_y * h->c.mb_stride;
  303. /* left prediction */
  304. ac_val -= 16;
  305. if (h->c.mb_x == 0 || h->c.qscale == qscale_table[xy] ||
  306. n == 1 || n == 3) {
  307. /* same qscale */
  308. for (i = 1; i < 8; i++)
  309. block[h->c.idsp.idct_permutation[i << 3]] += ac_val[i];
  310. } else {
  311. /* different qscale, we must rescale */
  312. for (i = 1; i < 8; i++)
  313. block[h->c.idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], h->c.qscale);
  314. }
  315. } else {
  316. const int xy = h->c.mb_x + h->c.mb_y * h->c.mb_stride - h->c.mb_stride;
  317. /* top prediction */
  318. ac_val -= 16 * h->c.block_wrap[n];
  319. if (h->c.mb_y == 0 || h->c.qscale == qscale_table[xy] ||
  320. n == 2 || n == 3) {
  321. /* same qscale */
  322. for (i = 1; i < 8; i++)
  323. block[h->c.idsp.idct_permutation[i]] += ac_val[i + 8];
  324. } else {
  325. /* different qscale, we must rescale */
  326. for (i = 1; i < 8; i++)
  327. block[h->c.idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], h->c.qscale);
  328. }
  329. }
  330. }
  331. /* left copy */
  332. for (i = 1; i < 8; i++)
  333. ac_val1[i] = block[h->c.idsp.idct_permutation[i << 3]];
  334. /* top copy */
  335. for (i = 1; i < 8; i++)
  336. ac_val1[8 + i] = block[h->c.idsp.idct_permutation[i]];
  337. }
  338. /**
  339. * check if the next stuff is a resync marker or the end.
  340. * @return 0 if not
  341. */
  342. static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
  343. {
  344. H263DecContext *const h = &ctx->h;
  345. int bits_count = get_bits_count(&h->gb);
  346. int v = show_bits(&h->gb, 16);
  347. if (h->c.workaround_bugs & FF_BUG_NO_PADDING && !ctx->resync_marker)
  348. return 0;
  349. while (v <= 0xFF) {
  350. if (h->c.pict_type == AV_PICTURE_TYPE_B ||
  351. (v >> (8 - h->c.pict_type) != 1) || h->partitioned_frame)
  352. break;
  353. skip_bits(&h->gb, 8 + h->c.pict_type);
  354. bits_count += 8 + h->c.pict_type;
  355. v = show_bits(&h->gb, 16);
  356. }
  357. if (bits_count + 8 >= h->gb.size_in_bits) {
  358. v >>= 8;
  359. v |= 0x7F >> (7 - (bits_count & 7));
  360. if (v == 0x7F)
  361. return h->c.mb_num;
  362. } else {
  363. static const uint16_t mpeg4_resync_prefix[8] = {
  364. 0x7F00, 0x7E00, 0x7C00, 0x7800, 0x7000, 0x6000, 0x4000, 0x0000
  365. };
  366. if (v == mpeg4_resync_prefix[bits_count & 7]) {
  367. int len, mb_num;
  368. int mb_num_bits = av_log2(h->c.mb_num - 1) + 1;
  369. GetBitContext gb = h->gb;
  370. skip_bits(&h->gb, 1);
  371. align_get_bits(&h->gb);
  372. for (len = 0; len < 32; len++)
  373. if (get_bits1(&h->gb))
  374. break;
  375. mb_num = get_bits(&h->gb, mb_num_bits);
  376. if (!mb_num || mb_num > h->c.mb_num || get_bits_count(&h->gb) + 6 > h->gb.size_in_bits)
  377. mb_num= -1;
  378. h->gb = gb;
  379. if (len >= ff_mpeg4_get_video_packet_prefix_length(h->c.pict_type, ctx->f_code, ctx->b_code))
  380. return mb_num;
  381. }
  382. }
  383. return 0;
  384. }
  385. static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
  386. {
  387. MpegEncContext *s = &ctx->h.c;
  388. int a = 2 << ctx->sprite_warping_accuracy;
  389. int rho = 3 - ctx->sprite_warping_accuracy;
  390. int r = 16 / a;
  391. int alpha = 1;
  392. int beta = 0;
  393. int w = s->width;
  394. int h = s->height;
  395. int min_ab, i, w2, h2, w3, h3;
  396. int sprite_ref[4][2];
  397. int virtual_ref[2][2];
  398. int64_t sprite_offset[2][2];
  399. int64_t sprite_delta[2][2];
  400. // only true for rectangle shapes
  401. const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 },
  402. { 0, s->height }, { s->width, s->height } };
  403. int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
  404. if (w <= 0 || h <= 0)
  405. return AVERROR_INVALIDDATA;
  406. for (i = 0; i < ctx->num_sprite_warping_points; i++) {
  407. int length;
  408. int x = 0, y = 0;
  409. length = get_vlc2(gb, sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 2);
  410. if (length > 0)
  411. x = get_xbits(gb, length);
  412. if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
  413. check_marker(s->avctx, gb, "before sprite_trajectory");
  414. length = get_vlc2(gb, sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 2);
  415. if (length > 0)
  416. y = get_xbits(gb, length);
  417. check_marker(s->avctx, gb, "after sprite_trajectory");
  418. ctx->sprite_traj[i][0] = d[i][0] = x;
  419. ctx->sprite_traj[i][1] = d[i][1] = y;
  420. }
  421. for (; i < 4; i++)
  422. ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0;
  423. while ((1 << alpha) < w)
  424. alpha++;
  425. while ((1 << beta) < h)
  426. beta++; /* typo in the MPEG-4 std for the definition of w' and h' */
  427. w2 = 1 << alpha;
  428. h2 = 1 << beta;
  429. // Note, the 4th point isn't used for GMC
  430. if (ctx->divx_version == 500 && ctx->divx_build == 413) {
  431. sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0];
  432. sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1];
  433. sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0];
  434. sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1];
  435. sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0];
  436. sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1];
  437. } else {
  438. sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]);
  439. sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]);
  440. sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]);
  441. sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]);
  442. sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]);
  443. sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]);
  444. }
  445. /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]);
  446. * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */
  447. /* This is mostly identical to the MPEG-4 std (and is totally unreadable
  448. * because of that...). Perhaps it should be reordered to be more readable.
  449. * The idea behind this virtual_ref mess is to be able to use shifts later
  450. * per pixel instead of divides so the distance between points is converted
  451. * from w&h based to w2&h2 based which are of the 2^x form. */
  452. virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
  453. ROUNDED_DIV(((w - w2) *
  454. (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) +
  455. w2 * (r * sprite_ref[1][0] - 16LL * vop_ref[1][0])), w);
  456. virtual_ref[0][1] = 16 * vop_ref[0][1] +
  457. ROUNDED_DIV(((w - w2) *
  458. (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) +
  459. w2 * (r * sprite_ref[1][1] - 16LL * vop_ref[1][1])), w);
  460. virtual_ref[1][0] = 16 * vop_ref[0][0] +
  461. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) +
  462. h2 * (r * sprite_ref[2][0] - 16LL * vop_ref[2][0])), h);
  463. virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
  464. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) +
  465. h2 * (r * sprite_ref[2][1] - 16LL * vop_ref[2][1])), h);
  466. switch (ctx->num_sprite_warping_points) {
  467. case 0:
  468. sprite_offset[0][0] =
  469. sprite_offset[0][1] =
  470. sprite_offset[1][0] =
  471. sprite_offset[1][1] = 0;
  472. sprite_delta[0][0] = a;
  473. sprite_delta[0][1] =
  474. sprite_delta[1][0] = 0;
  475. sprite_delta[1][1] = a;
  476. ctx->sprite_shift[0] =
  477. ctx->sprite_shift[1] = 0;
  478. break;
  479. case 1: // GMC only
  480. sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0];
  481. sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1];
  482. sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
  483. a * (vop_ref[0][0] / 2);
  484. sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
  485. a * (vop_ref[0][1] / 2);
  486. sprite_delta[0][0] = a;
  487. sprite_delta[0][1] =
  488. sprite_delta[1][0] = 0;
  489. sprite_delta[1][1] = a;
  490. ctx->sprite_shift[0] =
  491. ctx->sprite_shift[1] = 0;
  492. break;
  493. case 2:
  494. sprite_offset[0][0] = ((int64_t) sprite_ref[0][0] * (1 << alpha + rho)) +
  495. ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) *
  496. ((int64_t) -vop_ref[0][0]) +
  497. ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) *
  498. ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1));
  499. sprite_offset[0][1] = ((int64_t) sprite_ref[0][1] * (1 << alpha + rho)) +
  500. ((int64_t) -r * sprite_ref[0][1] + virtual_ref[0][1]) *
  501. ((int64_t) -vop_ref[0][0]) +
  502. ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) *
  503. ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1));
  504. sprite_offset[1][0] = (((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  505. ((int64_t)-2 * vop_ref[0][0] + 1) +
  506. ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) *
  507. ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  508. (int64_t) sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1)));
  509. sprite_offset[1][1] = (((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  510. ((int64_t)-2 * vop_ref[0][0] + 1) +
  511. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  512. ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  513. (int64_t) sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1)));
  514. sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  515. sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]);
  516. sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]);
  517. sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  518. ctx->sprite_shift[0] = alpha + rho;
  519. ctx->sprite_shift[1] = alpha + rho + 2;
  520. break;
  521. case 3:
  522. min_ab = FFMIN(alpha, beta);
  523. w3 = w2 >> min_ab;
  524. h3 = h2 >> min_ab;
  525. sprite_offset[0][0] = ((int64_t)sprite_ref[0][0] * (1 << (alpha + beta + rho - min_ab))) +
  526. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-vop_ref[0][0]) +
  527. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-vop_ref[0][1]) +
  528. ((int64_t)1 << (alpha + beta + rho - min_ab - 1));
  529. sprite_offset[0][1] = ((int64_t)sprite_ref[0][1] * (1 << (alpha + beta + rho - min_ab))) +
  530. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-vop_ref[0][0]) +
  531. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-vop_ref[0][1]) +
  532. ((int64_t)1 << (alpha + beta + rho - min_ab - 1));
  533. sprite_offset[1][0] = ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-2 * vop_ref[0][0] + 1) +
  534. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-2 * vop_ref[0][1] + 1) +
  535. (int64_t)2 * w2 * h3 * r * sprite_ref[0][0] - 16 * w2 * h3 +
  536. ((int64_t)1 << (alpha + beta + rho - min_ab + 1));
  537. sprite_offset[1][1] = ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-2 * vop_ref[0][0] + 1) +
  538. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-2 * vop_ref[0][1] + 1) +
  539. (int64_t)2 * w2 * h3 * r * sprite_ref[0][1] - 16 * w2 * h3 +
  540. ((int64_t)1 << (alpha + beta + rho - min_ab + 1));
  541. sprite_delta[0][0] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[0][0]) * h3;
  542. sprite_delta[0][1] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[1][0]) * w3;
  543. sprite_delta[1][0] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[0][1]) * h3;
  544. sprite_delta[1][1] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[1][1]) * w3;
  545. ctx->sprite_shift[0] = alpha + beta + rho - min_ab;
  546. ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2;
  547. break;
  548. default:
  549. av_unreachable("num_sprite_warping_points outside of 0..3 results in an error"
  550. "in which num_sprite_warping_points is reset to zero");
  551. }
  552. /* try to simplify the situation */
  553. if (sprite_delta[0][0] == a << ctx->sprite_shift[0] &&
  554. sprite_delta[0][1] == 0 &&
  555. sprite_delta[1][0] == 0 &&
  556. sprite_delta[1][1] == a << ctx->sprite_shift[0]) {
  557. sprite_offset[0][0] >>= ctx->sprite_shift[0];
  558. sprite_offset[0][1] >>= ctx->sprite_shift[0];
  559. sprite_offset[1][0] >>= ctx->sprite_shift[1];
  560. sprite_offset[1][1] >>= ctx->sprite_shift[1];
  561. sprite_delta[0][0] = a;
  562. sprite_delta[0][1] = 0;
  563. sprite_delta[1][0] = 0;
  564. sprite_delta[1][1] = a;
  565. ctx->sprite_shift[0] = 0;
  566. ctx->sprite_shift[1] = 0;
  567. ctx->real_sprite_warping_points = 1;
  568. } else {
  569. int shift_y = 16 - ctx->sprite_shift[0];
  570. int shift_c = 16 - ctx->sprite_shift[1];
  571. for (i = 0; i < 2; i++) {
  572. if (shift_c < 0 || shift_y < 0 ||
  573. FFABS( sprite_offset[0][i]) >= INT_MAX >> shift_y ||
  574. FFABS( sprite_offset[1][i]) >= INT_MAX >> shift_c ||
  575. FFABS( sprite_delta[0][i]) >= INT_MAX >> shift_y ||
  576. FFABS( sprite_delta[1][i]) >= INT_MAX >> shift_y
  577. ) {
  578. avpriv_request_sample(s->avctx, "Too large sprite shift, delta or offset");
  579. goto overflow;
  580. }
  581. }
  582. for (i = 0; i < 2; i++) {
  583. sprite_offset[0][i] *= 1 << shift_y;
  584. sprite_offset[1][i] *= 1 << shift_c;
  585. sprite_delta[0][i] *= 1 << shift_y;
  586. sprite_delta[1][i] *= 1 << shift_y;
  587. ctx->sprite_shift[i] = 16;
  588. }
  589. for (i = 0; i < 2; i++) {
  590. int64_t sd[2] = {
  591. sprite_delta[i][0] - a * (1LL<<16),
  592. sprite_delta[i][1] - a * (1LL<<16)
  593. };
  594. if (llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL)) >= INT_MAX ||
  595. llabs(sprite_offset[0][i] + sprite_delta[i][1] * (h+16LL)) >= INT_MAX ||
  596. llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL) + sprite_delta[i][1] * (h+16LL)) >= INT_MAX ||
  597. llabs(sprite_delta[i][0] * (w+16LL)) >= INT_MAX ||
  598. llabs(sprite_delta[i][1] * (h+16LL)) >= INT_MAX ||
  599. llabs(sd[0]) >= INT_MAX ||
  600. llabs(sd[1]) >= INT_MAX ||
  601. llabs(sprite_offset[0][i] + sd[0] * (w+16LL)) >= INT_MAX ||
  602. llabs(sprite_offset[0][i] + sd[1] * (h+16LL)) >= INT_MAX ||
  603. llabs(sprite_offset[0][i] + sd[0] * (w+16LL) + sd[1] * (h+16LL)) >= INT_MAX
  604. ) {
  605. avpriv_request_sample(s->avctx, "Overflow on sprite points");
  606. goto overflow;
  607. }
  608. }
  609. ctx->real_sprite_warping_points = ctx->num_sprite_warping_points;
  610. }
  611. for (i = 0; i < 4; i++) {
  612. ctx->sprite_offset[i&1][i>>1] = sprite_offset[i&1][i>>1];
  613. ctx->sprite_delta [i&1][i>>1] = sprite_delta [i&1][i>>1];
  614. }
  615. return 0;
  616. overflow:
  617. memset(ctx->sprite_offset, 0, sizeof(ctx->sprite_offset));
  618. memset(ctx->sprite_delta, 0, sizeof(ctx->sprite_delta));
  619. return AVERROR_PATCHWELCOME;
  620. }
  621. static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
  622. int len = FFMIN(ctx->time_increment_bits + 3, 15);
  623. get_bits(gb, len);
  624. if (get_bits1(gb))
  625. get_bits(gb, len);
  626. check_marker(ctx->h.c.avctx, gb, "after new_pred");
  627. return 0;
  628. }
  629. /**
  630. * Decode the next video packet.
  631. * @return <0 if something went wrong
  632. */
  633. int ff_mpeg4_decode_video_packet_header(H263DecContext *const h)
  634. {
  635. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  636. int mb_num_bits = av_log2(h->c.mb_num - 1) + 1;
  637. int header_extension = 0, mb_num, len;
  638. /* is there enough space left for a video packet + header */
  639. if (get_bits_count(&h->gb) > h->gb.size_in_bits - 20)
  640. return AVERROR_INVALIDDATA;
  641. for (len = 0; len < 32; len++)
  642. if (get_bits1(&h->gb))
  643. break;
  644. if (len != ff_mpeg4_get_video_packet_prefix_length(h->c.pict_type, ctx->f_code, ctx->b_code)) {
  645. av_log(h->c.avctx, AV_LOG_ERROR, "marker does not match f_code\n");
  646. return AVERROR_INVALIDDATA;
  647. }
  648. if (ctx->shape != RECT_SHAPE) {
  649. header_extension = get_bits1(&h->gb);
  650. // FIXME more stuff here
  651. }
  652. mb_num = get_bits(&h->gb, mb_num_bits);
  653. if (mb_num >= h->c.mb_num || !mb_num) {
  654. av_log(h->c.avctx, AV_LOG_ERROR,
  655. "illegal mb_num in video packet (%d %d) \n", mb_num, h->c.mb_num);
  656. return AVERROR_INVALIDDATA;
  657. }
  658. h->c.mb_x = mb_num % h->c.mb_width;
  659. h->c.mb_y = mb_num / h->c.mb_width;
  660. if (ctx->shape != BIN_ONLY_SHAPE) {
  661. int qscale = get_bits(&h->gb, ctx->quant_precision);
  662. if (qscale)
  663. h->c.chroma_qscale = h->c.qscale = qscale;
  664. }
  665. if (ctx->shape == RECT_SHAPE)
  666. header_extension = get_bits1(&h->gb);
  667. if (header_extension) {
  668. while (get_bits1(&h->gb) != 0)
  669. ;
  670. check_marker(h->c.avctx, &h->gb, "before time_increment in video packed header");
  671. skip_bits(&h->gb, ctx->time_increment_bits); /* time_increment */
  672. check_marker(h->c.avctx, &h->gb, "before vop_coding_type in video packed header");
  673. skip_bits(&h->gb, 2); /* vop coding type */
  674. // FIXME not rect stuff here
  675. if (ctx->shape != BIN_ONLY_SHAPE) {
  676. skip_bits(&h->gb, 3); /* intra dc vlc threshold */
  677. // FIXME don't just ignore everything
  678. if (h->c.pict_type == AV_PICTURE_TYPE_S &&
  679. ctx->vol_sprite_usage == GMC_SPRITE) {
  680. if (mpeg4_decode_sprite_trajectory(ctx, &h->gb) < 0)
  681. return AVERROR_INVALIDDATA;
  682. av_log(h->c.avctx, AV_LOG_ERROR, "untested\n");
  683. }
  684. // FIXME reduced res stuff here
  685. if (h->c.pict_type != AV_PICTURE_TYPE_I) {
  686. int f_code = get_bits(&h->gb, 3); /* fcode_for */
  687. if (f_code == 0)
  688. av_log(h->c.avctx, AV_LOG_ERROR,
  689. "Error, video packet header damaged (f_code=0)\n");
  690. }
  691. if (h->c.pict_type == AV_PICTURE_TYPE_B) {
  692. int b_code = get_bits(&h->gb, 3);
  693. if (b_code == 0)
  694. av_log(h->c.avctx, AV_LOG_ERROR,
  695. "Error, video packet header damaged (b_code=0)\n");
  696. }
  697. }
  698. }
  699. if (ctx->new_pred)
  700. decode_new_pred(ctx, &h->gb);
  701. return 0;
  702. }
  703. static void reset_studio_dc_predictors(Mpeg4DecContext *const ctx)
  704. {
  705. H263DecContext *const h = &ctx->h;
  706. /* Reset DC Predictors */
  707. h->last_dc[0] =
  708. h->last_dc[1] =
  709. h->last_dc[2] = 1 << (h->c.avctx->bits_per_raw_sample + ctx->dct_precision + h->c.intra_dc_precision - 1);
  710. }
  711. /**
  712. * Decode the next video packet.
  713. * @return <0 if something went wrong
  714. */
  715. int ff_mpeg4_decode_studio_slice_header(H263DecContext *const h)
  716. {
  717. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  718. GetBitContext *gb = &h->gb;
  719. unsigned vlc_len;
  720. uint16_t mb_num;
  721. if (get_bits_left(gb) >= 32 && get_bits_long(gb, 32) == SLICE_STARTCODE) {
  722. vlc_len = av_log2(h->c.mb_width * h->c.mb_height) + 1;
  723. mb_num = get_bits(gb, vlc_len);
  724. if (mb_num >= h->c.mb_num)
  725. return AVERROR_INVALIDDATA;
  726. h->c.mb_x = mb_num % h->c.mb_width;
  727. h->c.mb_y = mb_num / h->c.mb_width;
  728. if (ctx->shape != BIN_ONLY_SHAPE)
  729. h->c.qscale = mpeg_get_qscale(&h->gb, h->c.q_scale_type);
  730. if (get_bits1(gb)) { /* slice_extension_flag */
  731. skip_bits1(gb); /* intra_slice */
  732. skip_bits1(gb); /* slice_VOP_id_enable */
  733. skip_bits(gb, 6); /* slice_VOP_id */
  734. while (get_bits1(gb)) /* extra_bit_slice */
  735. skip_bits(gb, 8); /* extra_information_slice */
  736. }
  737. reset_studio_dc_predictors(ctx);
  738. }
  739. else {
  740. return AVERROR_INVALIDDATA;
  741. }
  742. return 0;
  743. }
  744. /**
  745. * Get the average motion vector for a GMC MB.
  746. * @param n either 0 for the x component or 1 for y
  747. * @return the average MV for a GMC MB
  748. */
  749. static inline int get_amv(Mpeg4DecContext *ctx, int n)
  750. {
  751. MPVContext *const s = &ctx->h.c;
  752. int x, y, mb_v, sum, dx, dy, shift;
  753. int len = 1 << (ctx->f_code + 4);
  754. const int a = ctx->sprite_warping_accuracy;
  755. if (s->workaround_bugs & FF_BUG_AMV)
  756. len >>= s->quarter_sample;
  757. if (ctx->real_sprite_warping_points == 1) {
  758. if (ctx->divx_version == 500 && ctx->divx_build == 413 && a >= s->quarter_sample)
  759. sum = ctx->sprite_offset[0][n] / (1 << (a - s->quarter_sample));
  760. else
  761. sum = RSHIFT(ctx->sprite_offset[0][n] * (1 << s->quarter_sample), a);
  762. } else {
  763. dx = ctx->sprite_delta[n][0];
  764. dy = ctx->sprite_delta[n][1];
  765. shift = ctx->sprite_shift[0];
  766. if (n)
  767. dy -= 1 << (shift + a + 1);
  768. else
  769. dx -= 1 << (shift + a + 1);
  770. mb_v = ctx->sprite_offset[0][n] + dx * s->mb_x * 16U + dy * s->mb_y * 16U;
  771. sum = 0;
  772. for (y = 0; y < 16; y++) {
  773. int v;
  774. v = mb_v + (unsigned)dy * y;
  775. // FIXME optimize
  776. for (x = 0; x < 16; x++) {
  777. sum += v >> shift;
  778. v += dx;
  779. }
  780. }
  781. sum = RSHIFT(sum, a + 8 - s->quarter_sample);
  782. }
  783. if (sum < -len)
  784. sum = -len;
  785. else if (sum >= len)
  786. sum = len - 1;
  787. return sum;
  788. }
  789. /**
  790. * Predict the dc.
  791. * @param n block index (0-3 are luma, 4-5 are chroma)
  792. * @param dir_ptr pointer to an integer where the prediction direction will be stored
  793. */
  794. static inline int mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
  795. {
  796. const int16_t *const dc_val = s->dc_val + s->block_index[n];
  797. const int wrap = s->block_wrap[n];
  798. int pred;
  799. /* find prediction */
  800. /* B C
  801. * A X
  802. */
  803. int a = dc_val[-1];
  804. int b = dc_val[-1 - wrap];
  805. int c = dc_val[-wrap];
  806. /* outside slice handling (we can't do that by memset as we need the
  807. * dc for error resilience) */
  808. if (s->first_slice_line && n != 3) {
  809. if (n != 2)
  810. b = c = 1024;
  811. if (n != 1 && s->mb_x == s->resync_mb_x)
  812. b = a = 1024;
  813. }
  814. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) {
  815. if (n == 0 || n == 4 || n == 5)
  816. b = 1024;
  817. }
  818. if (abs(a - b) < abs(b - c)) {
  819. pred = c;
  820. *dir_ptr = 1; /* top */
  821. } else {
  822. pred = a;
  823. *dir_ptr = 0; /* left */
  824. }
  825. return pred;
  826. }
  827. static inline int mpeg4_get_level_dc(MpegEncContext *s, int n, int pred, int level)
  828. {
  829. int scale = n < 4 ? s->y_dc_scale : s->c_dc_scale;
  830. int ret;
  831. if (IS_3IV1)
  832. scale = 8;
  833. /* we assume pred is positive */
  834. pred = FASTDIV((pred + (scale >> 1)), scale);
  835. level += pred;
  836. ret = level;
  837. level *= scale;
  838. if (level & (~2047)) {
  839. if (s->avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) {
  840. if (level < 0) {
  841. av_log(s->avctx, AV_LOG_ERROR,
  842. "dc<0 at %dx%d\n", s->mb_x, s->mb_y);
  843. return AVERROR_INVALIDDATA;
  844. }
  845. if (level > 2048 + scale) {
  846. av_log(s->avctx, AV_LOG_ERROR,
  847. "dc overflow at %dx%d\n", s->mb_x, s->mb_y);
  848. return AVERROR_INVALIDDATA;
  849. }
  850. }
  851. if (level < 0)
  852. level = 0;
  853. else if (!(s->workaround_bugs & FF_BUG_DC_CLIP))
  854. level = 2047;
  855. }
  856. s->dc_val[s->block_index[n]] = level;
  857. return ret;
  858. }
  859. /**
  860. * Decode the dc value.
  861. * @param n block index (0-3 are luma, 4-5 are chroma)
  862. * @param dir_ptr the prediction direction will be stored here
  863. * @return the quantized dc
  864. */
  865. static inline int mpeg4_decode_dc(H263DecContext *const h, int n, int *dir_ptr)
  866. {
  867. int level, code, pred;
  868. if (n < 4)
  869. code = get_vlc2(&h->gb, dc_lum, DC_VLC_BITS, 1);
  870. else
  871. code = get_vlc2(&h->gb, dc_chrom, DC_VLC_BITS, 1);
  872. if (code < 0) {
  873. av_log(h->c.avctx, AV_LOG_ERROR, "illegal dc vlc\n");
  874. return AVERROR_INVALIDDATA;
  875. }
  876. if (code == 0) {
  877. level = 0;
  878. } else {
  879. if (IS_3IV1) {
  880. if (code == 1)
  881. level = 2 * get_bits1(&h->gb) - 1;
  882. else {
  883. if (get_bits1(&h->gb))
  884. level = get_bits(&h->gb, code - 1) + (1 << (code - 1));
  885. else
  886. level = -get_bits(&h->gb, code - 1) - (1 << (code - 1));
  887. }
  888. } else {
  889. level = get_xbits(&h->gb, code);
  890. }
  891. if (code > 8) {
  892. if (get_bits1(&h->gb) == 0) { /* marker */
  893. if (h->c.avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)) {
  894. av_log(h->c.avctx, AV_LOG_ERROR, "dc marker bit missing\n");
  895. return AVERROR_INVALIDDATA;
  896. }
  897. }
  898. }
  899. }
  900. pred = mpeg4_pred_dc(&h->c, n, dir_ptr);
  901. return mpeg4_get_level_dc(&h->c, n, pred, level);
  902. }
  903. /**
  904. * Decode first partition.
  905. * @return number of MBs decoded or <0 if an error occurred
  906. */
  907. static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
  908. {
  909. H263DecContext *const h = &ctx->h;
  910. int mb_num = 0;
  911. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  912. /* decode first partition */
  913. h->c.first_slice_line = 1;
  914. for (; h->c.mb_y < h->c.mb_height; h->c.mb_y++) {
  915. ff_init_block_index(&h->c);
  916. for (; h->c.mb_x < h->c.mb_width; h->c.mb_x++) {
  917. const int xy = h->c.mb_x + h->c.mb_y * h->c.mb_stride;
  918. int cbpc;
  919. int dir = 0;
  920. mb_num++;
  921. ff_update_block_index(&h->c, 8, h->c.avctx->lowres, 1);
  922. if (h->c.mb_x == h->c.resync_mb_x && h->c.mb_y == h->c.resync_mb_y + 1)
  923. h->c.first_slice_line = 0;
  924. if (h->c.pict_type == AV_PICTURE_TYPE_I) {
  925. int i;
  926. do {
  927. if (show_bits(&h->gb, 19) == DC_MARKER)
  928. return mb_num - 1;
  929. cbpc = get_vlc2(&h->gb, ff_h263_intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 2);
  930. if (cbpc < 0) {
  931. av_log(h->c.avctx, AV_LOG_ERROR,
  932. "mcbpc corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  933. return AVERROR_INVALIDDATA;
  934. }
  935. } while (cbpc == 8);
  936. h->c.cbp_table[xy] = cbpc & 3;
  937. h->c.cur_pic.mb_type[xy] = MB_TYPE_INTRA;
  938. h->c.mb_intra = 1;
  939. if (cbpc & 4)
  940. ff_set_qscale(&h->c, h->c.qscale + quant_tab[get_bits(&h->gb, 2)]);
  941. h->c.cur_pic.qscale_table[xy] = h->c.qscale;
  942. h->c.mbintra_table[xy] = 1;
  943. for (i = 0; i < 6; i++) {
  944. int dc_pred_dir;
  945. int dc = mpeg4_decode_dc(h, i, &dc_pred_dir);
  946. if (dc < 0) {
  947. av_log(h->c.avctx, AV_LOG_ERROR,
  948. "DC corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  949. return dc;
  950. }
  951. dir <<= 1;
  952. if (dc_pred_dir)
  953. dir |= 1;
  954. }
  955. h->c.pred_dir_table[xy] = dir;
  956. } else { /* P/S_TYPE */
  957. int mx, my, pred_x, pred_y, bits;
  958. int16_t *const mot_val = h->c.cur_pic.motion_val[0][h->c.block_index[0]];
  959. const int stride = h->c.b8_stride * 2;
  960. try_again:
  961. bits = show_bits(&h->gb, 17);
  962. if (bits == MOTION_MARKER)
  963. return mb_num - 1;
  964. skip_bits1(&h->gb);
  965. if (bits & 0x10000) {
  966. /* skip mb */
  967. if (h->c.pict_type == AV_PICTURE_TYPE_S &&
  968. ctx->vol_sprite_usage == GMC_SPRITE) {
  969. h->c.cur_pic.mb_type[xy] = MB_TYPE_SKIP |
  970. MB_TYPE_16x16 |
  971. MB_TYPE_GMC |
  972. MB_TYPE_FORWARD_MV;
  973. mx = get_amv(ctx, 0);
  974. my = get_amv(ctx, 1);
  975. } else {
  976. h->c.cur_pic.mb_type[xy] = MB_TYPE_SKIP |
  977. MB_TYPE_16x16 |
  978. MB_TYPE_FORWARD_MV;
  979. mx = my = 0;
  980. }
  981. mot_val[0] =
  982. mot_val[2] =
  983. mot_val[0 + stride] =
  984. mot_val[2 + stride] = mx;
  985. mot_val[1] =
  986. mot_val[3] =
  987. mot_val[1 + stride] =
  988. mot_val[3 + stride] = my;
  989. ff_h263_clean_intra_table_entries(&h->c, xy);
  990. continue;
  991. }
  992. cbpc = get_vlc2(&h->gb, ff_h263_inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 2);
  993. if (cbpc < 0) {
  994. av_log(h->c.avctx, AV_LOG_ERROR,
  995. "mcbpc corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  996. return AVERROR_INVALIDDATA;
  997. }
  998. if (cbpc == 20)
  999. goto try_again;
  1000. h->c.cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant
  1001. h->c.mb_intra = ((cbpc & 4) != 0);
  1002. if (h->c.mb_intra) {
  1003. h->c.cur_pic.mb_type[xy] = MB_TYPE_INTRA;
  1004. h->c.mbintra_table[xy] = 1;
  1005. mot_val[0] =
  1006. mot_val[2] =
  1007. mot_val[0 + stride] =
  1008. mot_val[2 + stride] = 0;
  1009. mot_val[1] =
  1010. mot_val[3] =
  1011. mot_val[1 + stride] =
  1012. mot_val[3 + stride] = 0;
  1013. } else {
  1014. ff_h263_clean_intra_table_entries(&h->c, xy);
  1015. if (h->c.pict_type == AV_PICTURE_TYPE_S &&
  1016. ctx->vol_sprite_usage == GMC_SPRITE &&
  1017. (cbpc & 16) == 0)
  1018. h->c.mcsel = get_bits1(&h->gb);
  1019. else
  1020. h->c.mcsel = 0;
  1021. if ((cbpc & 16) == 0) {
  1022. /* 16x16 motion prediction */
  1023. ff_h263_pred_motion(&h->c, 0, 0, &pred_x, &pred_y);
  1024. if (!h->c.mcsel) {
  1025. mx = ff_h263_decode_motion(h, pred_x, ctx->f_code);
  1026. if (mx >= 0xffff)
  1027. return AVERROR_INVALIDDATA;
  1028. my = ff_h263_decode_motion(h, pred_y, ctx->f_code);
  1029. if (my >= 0xffff)
  1030. return AVERROR_INVALIDDATA;
  1031. h->c.cur_pic.mb_type[xy] = MB_TYPE_16x16 |
  1032. MB_TYPE_FORWARD_MV;
  1033. } else {
  1034. mx = get_amv(ctx, 0);
  1035. my = get_amv(ctx, 1);
  1036. h->c.cur_pic.mb_type[xy] = MB_TYPE_16x16 |
  1037. MB_TYPE_GMC |
  1038. MB_TYPE_FORWARD_MV;
  1039. }
  1040. mot_val[0] =
  1041. mot_val[2] =
  1042. mot_val[0 + stride] =
  1043. mot_val[2 + stride] = mx;
  1044. mot_val[1] =
  1045. mot_val[3] =
  1046. mot_val[1 + stride] =
  1047. mot_val[3 + stride] = my;
  1048. } else {
  1049. int i;
  1050. h->c.cur_pic.mb_type[xy] = MB_TYPE_8x8 |
  1051. MB_TYPE_FORWARD_MV;
  1052. for (i = 0; i < 4; i++) {
  1053. int16_t *mot_val = ff_h263_pred_motion(&h->c, i, 0, &pred_x, &pred_y);
  1054. mx = ff_h263_decode_motion(h, pred_x, ctx->f_code);
  1055. if (mx >= 0xffff)
  1056. return AVERROR_INVALIDDATA;
  1057. my = ff_h263_decode_motion(h, pred_y, ctx->f_code);
  1058. if (my >= 0xffff)
  1059. return AVERROR_INVALIDDATA;
  1060. mot_val[0] = mx;
  1061. mot_val[1] = my;
  1062. }
  1063. }
  1064. }
  1065. }
  1066. }
  1067. h->c.mb_x = 0;
  1068. }
  1069. return mb_num;
  1070. }
  1071. /**
  1072. * decode second partition.
  1073. * @return <0 if an error occurred
  1074. */
  1075. static int mpeg4_decode_partition_b(H263DecContext *const h, int mb_count)
  1076. {
  1077. int mb_num = 0;
  1078. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1079. h->c.mb_x = h->c.resync_mb_x;
  1080. h->c.first_slice_line = 1;
  1081. for (h->c.mb_y = h->c.resync_mb_y; mb_num < mb_count; h->c.mb_y++) {
  1082. ff_init_block_index(&h->c);
  1083. for (; mb_num < mb_count && h->c.mb_x < h->c.mb_width; h->c.mb_x++) {
  1084. const int xy = h->c.mb_x + h->c.mb_y * h->c.mb_stride;
  1085. mb_num++;
  1086. ff_update_block_index(&h->c, 8, h->c.avctx->lowres, 1);
  1087. if (h->c.mb_x == h->c.resync_mb_x && h->c.mb_y == h->c.resync_mb_y + 1)
  1088. h->c.first_slice_line = 0;
  1089. if (h->c.pict_type == AV_PICTURE_TYPE_I) {
  1090. int ac_pred = get_bits1(&h->gb);
  1091. int cbpy = get_vlc2(&h->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
  1092. if (cbpy < 0) {
  1093. av_log(h->c.avctx, AV_LOG_ERROR,
  1094. "cbpy corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  1095. return AVERROR_INVALIDDATA;
  1096. }
  1097. h->c.cbp_table[xy] |= cbpy << 2;
  1098. h->c.cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  1099. } else { /* P || S_TYPE */
  1100. if (IS_INTRA(h->c.cur_pic.mb_type[xy])) {
  1101. int i;
  1102. int dir = 0;
  1103. int ac_pred = get_bits1(&h->gb);
  1104. int cbpy = get_vlc2(&h->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
  1105. if (cbpy < 0) {
  1106. av_log(h->c.avctx, AV_LOG_ERROR,
  1107. "I cbpy corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  1108. return AVERROR_INVALIDDATA;
  1109. }
  1110. if (h->c.cbp_table[xy] & 8)
  1111. ff_set_qscale(&h->c, h->c.qscale + quant_tab[get_bits(&h->gb, 2)]);
  1112. h->c.cur_pic.qscale_table[xy] = h->c.qscale;
  1113. for (i = 0; i < 6; i++) {
  1114. int dc_pred_dir;
  1115. int dc = mpeg4_decode_dc(h, i, &dc_pred_dir);
  1116. if (dc < 0) {
  1117. av_log(h->c.avctx, AV_LOG_ERROR,
  1118. "DC corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  1119. return dc;
  1120. }
  1121. dir <<= 1;
  1122. if (dc_pred_dir)
  1123. dir |= 1;
  1124. }
  1125. h->c.cbp_table[xy] &= 3; // remove dquant
  1126. h->c.cbp_table[xy] |= cbpy << 2;
  1127. h->c.cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  1128. h->c.pred_dir_table[xy] = dir;
  1129. } else if (IS_SKIP(h->c.cur_pic.mb_type[xy])) {
  1130. h->c.cur_pic.qscale_table[xy] = h->c.qscale;
  1131. h->c.cbp_table[xy] = 0;
  1132. } else {
  1133. int cbpy = get_vlc2(&h->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
  1134. if (cbpy < 0) {
  1135. av_log(h->c.avctx, AV_LOG_ERROR,
  1136. "P cbpy corrupted at %d %d\n", h->c.mb_x, h->c.mb_y);
  1137. return AVERROR_INVALIDDATA;
  1138. }
  1139. if (h->c.cbp_table[xy] & 8)
  1140. ff_set_qscale(&h->c, h->c.qscale + quant_tab[get_bits(&h->gb, 2)]);
  1141. h->c.cur_pic.qscale_table[xy] = h->c.qscale;
  1142. h->c.cbp_table[xy] &= 3; // remove dquant
  1143. h->c.cbp_table[xy] |= (cbpy ^ 0xf) << 2;
  1144. }
  1145. }
  1146. }
  1147. if (mb_num >= mb_count)
  1148. return 0;
  1149. h->c.mb_x = 0;
  1150. }
  1151. return 0;
  1152. }
  1153. /**
  1154. * Decode the first and second partition.
  1155. * @return <0 if error (and sets error type in the error_status_table)
  1156. */
  1157. int ff_mpeg4_decode_partitions(H263DecContext *const h)
  1158. {
  1159. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  1160. int mb_num;
  1161. int ret;
  1162. const int part_a_error = h->c.pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
  1163. const int part_a_end = h->c.pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
  1164. mb_num = mpeg4_decode_partition_a(ctx);
  1165. if (mb_num <= 0) {
  1166. ff_er_add_slice(&h->c.er, h->c.resync_mb_x, h->c.resync_mb_y,
  1167. h->c.mb_x, h->c.mb_y, part_a_error);
  1168. return mb_num ? mb_num : AVERROR_INVALIDDATA;
  1169. }
  1170. if (h->c.resync_mb_x + h->c.resync_mb_y * h->c.mb_width + mb_num > h->c.mb_num) {
  1171. av_log(h->c.avctx, AV_LOG_ERROR, "slice below monitor ...\n");
  1172. ff_er_add_slice(&h->c.er, h->c.resync_mb_x, h->c.resync_mb_y,
  1173. h->c.mb_x, h->c.mb_y, part_a_error);
  1174. return AVERROR_INVALIDDATA;
  1175. }
  1176. h->mb_num_left = mb_num;
  1177. if (h->c.pict_type == AV_PICTURE_TYPE_I) {
  1178. while (show_bits(&h->gb, 9) == 1)
  1179. skip_bits(&h->gb, 9);
  1180. if (get_bits(&h->gb, 19) != DC_MARKER) {
  1181. av_log(h->c.avctx, AV_LOG_ERROR,
  1182. "marker missing after first I partition at %d %d\n",
  1183. h->c.mb_x, h->c.mb_y);
  1184. return AVERROR_INVALIDDATA;
  1185. }
  1186. } else {
  1187. while (show_bits(&h->gb, 10) == 1)
  1188. skip_bits(&h->gb, 10);
  1189. if (get_bits(&h->gb, 17) != MOTION_MARKER) {
  1190. av_log(h->c.avctx, AV_LOG_ERROR,
  1191. "marker missing after first P partition at %d %d\n",
  1192. h->c.mb_x, h->c.mb_y);
  1193. return AVERROR_INVALIDDATA;
  1194. }
  1195. }
  1196. ff_er_add_slice(&h->c.er, h->c.resync_mb_x, h->c.resync_mb_y,
  1197. h->c.mb_x - 1, h->c.mb_y, part_a_end);
  1198. ret = mpeg4_decode_partition_b(h, mb_num);
  1199. if (ret < 0) {
  1200. if (h->c.pict_type == AV_PICTURE_TYPE_P)
  1201. ff_er_add_slice(&h->c.er, h->c.resync_mb_x, h->c.resync_mb_y,
  1202. h->c.mb_x, h->c.mb_y, ER_DC_ERROR);
  1203. return ret;
  1204. } else {
  1205. if (h->c.pict_type == AV_PICTURE_TYPE_P)
  1206. ff_er_add_slice(&h->c.er, h->c.resync_mb_x, h->c.resync_mb_y,
  1207. h->c.mb_x - 1, h->c.mb_y, ER_DC_END);
  1208. }
  1209. return 0;
  1210. }
  1211. /**
  1212. * Decode a block.
  1213. * @return <0 if an error occurred
  1214. */
  1215. static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
  1216. int n, int coded, int intra,
  1217. int use_intra_dc_vlc, int rvlc)
  1218. {
  1219. H263DecContext *const h = &ctx->h;
  1220. int level, i, last, run, qmul, qadd, pred;
  1221. int av_uninit(dc_pred_dir);
  1222. const RLTable *rl;
  1223. const RL_VLC_ELEM *rl_vlc;
  1224. const uint8_t *scan_table;
  1225. // Note intra & rvlc should be optimized away if this is inlined
  1226. if (intra) {
  1227. // FIXME add short header support
  1228. if (use_intra_dc_vlc) {
  1229. /* DC coef */
  1230. if (h->partitioned_frame) {
  1231. level = h->c.dc_val[h->c.block_index[n]];
  1232. if (n < 4)
  1233. level = FASTDIV((level + (h->c.y_dc_scale >> 1)), h->c.y_dc_scale);
  1234. else
  1235. level = FASTDIV((level + (h->c.c_dc_scale >> 1)), h->c.c_dc_scale);
  1236. dc_pred_dir = (h->c.pred_dir_table[h->c.mb_x + h->c.mb_y * h->c.mb_stride] << n) & 32;
  1237. } else {
  1238. level = mpeg4_decode_dc(h, n, &dc_pred_dir);
  1239. if (level < 0)
  1240. return level;
  1241. }
  1242. block[0] = level;
  1243. i = 0;
  1244. } else {
  1245. i = -1;
  1246. pred = mpeg4_pred_dc(&h->c, n, &dc_pred_dir);
  1247. }
  1248. if (!coded)
  1249. goto not_coded;
  1250. if (rvlc) {
  1251. rl = &ff_rvlc_rl_intra;
  1252. rl_vlc = ff_rvlc_rl_intra.rl_vlc[0];
  1253. } else {
  1254. rl = &ff_mpeg4_rl_intra;
  1255. rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
  1256. }
  1257. if (h->c.ac_pred) {
  1258. if (dc_pred_dir == 0)
  1259. scan_table = h->c.permutated_intra_v_scantable; /* left */
  1260. else
  1261. scan_table = h->c.permutated_intra_h_scantable; /* top */
  1262. } else {
  1263. scan_table = h->c.intra_scantable.permutated;
  1264. }
  1265. qmul = 1;
  1266. qadd = 0;
  1267. } else {
  1268. i = -1;
  1269. if (!coded) {
  1270. h->c.block_last_index[n] = i;
  1271. return 0;
  1272. }
  1273. if (rvlc)
  1274. rl = &ff_rvlc_rl_inter;
  1275. else
  1276. rl = &ff_h263_rl_inter;
  1277. scan_table = h->c.intra_scantable.permutated;
  1278. if (ctx->mpeg_quant) {
  1279. qmul = 1;
  1280. qadd = 0;
  1281. if (rvlc)
  1282. rl_vlc = ff_rvlc_rl_inter.rl_vlc[0];
  1283. else
  1284. rl_vlc = ff_h263_rl_inter.rl_vlc[0];
  1285. } else {
  1286. qmul = h->c.qscale << 1;
  1287. qadd = (h->c.qscale - 1) | 1;
  1288. if (rvlc)
  1289. rl_vlc = ff_rvlc_rl_inter.rl_vlc[h->c.qscale];
  1290. else
  1291. rl_vlc = ff_h263_rl_inter.rl_vlc[h->c.qscale];
  1292. }
  1293. }
  1294. {
  1295. OPEN_READER(re, &h->gb);
  1296. for (;;) {
  1297. UPDATE_CACHE(re, &h->gb);
  1298. GET_RL_VLC(level, run, re, &h->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
  1299. if (level == 0) {
  1300. /* escape */
  1301. if (rvlc) {
  1302. if (SHOW_UBITS(re, &h->gb, 1) == 0) {
  1303. av_log(h->c.avctx, AV_LOG_ERROR,
  1304. "1. marker bit missing in rvlc esc\n");
  1305. return AVERROR_INVALIDDATA;
  1306. }
  1307. SKIP_CACHE(re, &h->gb, 1);
  1308. last = SHOW_UBITS(re, &h->gb, 1);
  1309. SKIP_CACHE(re, &h->gb, 1);
  1310. run = SHOW_UBITS(re, &h->gb, 6);
  1311. SKIP_COUNTER(re, &h->gb, 1 + 1 + 6);
  1312. UPDATE_CACHE(re, &h->gb);
  1313. if (SHOW_UBITS(re, &h->gb, 1) == 0) {
  1314. av_log(h->c.avctx, AV_LOG_ERROR,
  1315. "2. marker bit missing in rvlc esc\n");
  1316. return AVERROR_INVALIDDATA;
  1317. }
  1318. SKIP_CACHE(re, &h->gb, 1);
  1319. level = SHOW_UBITS(re, &h->gb, 11);
  1320. SKIP_CACHE(re, &h->gb, 11);
  1321. if (SHOW_UBITS(re, &h->gb, 5) != 0x10) {
  1322. av_log(h->c.avctx, AV_LOG_ERROR, "reverse esc missing\n");
  1323. return AVERROR_INVALIDDATA;
  1324. }
  1325. SKIP_CACHE(re, &h->gb, 5);
  1326. level = level * qmul + qadd;
  1327. level = (level ^ SHOW_SBITS(re, &h->gb, 1)) - SHOW_SBITS(re, &h->gb, 1);
  1328. SKIP_COUNTER(re, &h->gb, 1 + 11 + 5 + 1);
  1329. i += run + 1;
  1330. if (last)
  1331. i += 192;
  1332. } else {
  1333. int cache;
  1334. cache = GET_CACHE(re, &h->gb);
  1335. if (IS_3IV1)
  1336. cache ^= 0xC0000000;
  1337. if (cache & 0x80000000) {
  1338. if (cache & 0x40000000) {
  1339. /* third escape */
  1340. SKIP_CACHE(re, &h->gb, 2);
  1341. last = SHOW_UBITS(re, &h->gb, 1);
  1342. SKIP_CACHE(re, &h->gb, 1);
  1343. run = SHOW_UBITS(re, &h->gb, 6);
  1344. SKIP_COUNTER(re, &h->gb, 2 + 1 + 6);
  1345. UPDATE_CACHE(re, &h->gb);
  1346. if (IS_3IV1) {
  1347. level = SHOW_SBITS(re, &h->gb, 12);
  1348. LAST_SKIP_BITS(re, &h->gb, 12);
  1349. } else {
  1350. if (SHOW_UBITS(re, &h->gb, 1) == 0) {
  1351. av_log(h->c.avctx, AV_LOG_ERROR,
  1352. "1. marker bit missing in 3. esc\n");
  1353. if (!(h->c.avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&h->gb) <= 0)
  1354. return AVERROR_INVALIDDATA;
  1355. }
  1356. SKIP_CACHE(re, &h->gb, 1);
  1357. level = SHOW_SBITS(re, &h->gb, 12);
  1358. SKIP_CACHE(re, &h->gb, 12);
  1359. if (SHOW_UBITS(re, &h->gb, 1) == 0) {
  1360. av_log(h->c.avctx, AV_LOG_ERROR,
  1361. "2. marker bit missing in 3. esc\n");
  1362. if (!(h->c.avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&h->gb) <= 0)
  1363. return AVERROR_INVALIDDATA;
  1364. }
  1365. SKIP_COUNTER(re, &h->gb, 1 + 12 + 1);
  1366. }
  1367. #if 0
  1368. if (h->c.error_recognition >= FF_ER_COMPLIANT) {
  1369. const int abs_level= FFABS(level);
  1370. if (abs_level<=MAX_LEVEL && run<=MAX_RUN) {
  1371. const int run1= run - rl->max_run[last][abs_level] - 1;
  1372. if (abs_level <= rl->max_level[last][run]) {
  1373. av_log(h->c.avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n");
  1374. return AVERROR_INVALIDDATA;
  1375. }
  1376. if (h->c.error_recognition > FF_ER_COMPLIANT) {
  1377. if (abs_level <= rl->max_level[last][run]*2) {
  1378. av_log(h->c.avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n");
  1379. return AVERROR_INVALIDDATA;
  1380. }
  1381. if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) {
  1382. av_log(h->c.avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n");
  1383. return AVERROR_INVALIDDATA;
  1384. }
  1385. }
  1386. }
  1387. }
  1388. #endif
  1389. if (level > 0)
  1390. level = level * qmul + qadd;
  1391. else
  1392. level = level * qmul - qadd;
  1393. if ((unsigned)(level + 2048) > 4095) {
  1394. if (h->c.avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)) {
  1395. if (level > 2560 || level < -2560) {
  1396. av_log(h->c.avctx, AV_LOG_ERROR,
  1397. "|level| overflow in 3. esc, qp=%d\n",
  1398. h->c.qscale);
  1399. return AVERROR_INVALIDDATA;
  1400. }
  1401. }
  1402. level = level < 0 ? -2048 : 2047;
  1403. }
  1404. i += run + 1;
  1405. if (last)
  1406. i += 192;
  1407. } else {
  1408. /* second escape */
  1409. SKIP_BITS(re, &h->gb, 2);
  1410. GET_RL_VLC(level, run, re, &h->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1411. i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing
  1412. level = (level ^ SHOW_SBITS(re, &h->gb, 1)) - SHOW_SBITS(re, &h->gb, 1);
  1413. LAST_SKIP_BITS(re, &h->gb, 1);
  1414. }
  1415. } else {
  1416. /* first escape */
  1417. SKIP_BITS(re, &h->gb, 1);
  1418. GET_RL_VLC(level, run, re, &h->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1419. i += run;
  1420. level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing
  1421. level = (level ^ SHOW_SBITS(re, &h->gb, 1)) - SHOW_SBITS(re, &h->gb, 1);
  1422. LAST_SKIP_BITS(re, &h->gb, 1);
  1423. }
  1424. }
  1425. } else {
  1426. i += run;
  1427. level = (level ^ SHOW_SBITS(re, &h->gb, 1)) - SHOW_SBITS(re, &h->gb, 1);
  1428. LAST_SKIP_BITS(re, &h->gb, 1);
  1429. }
  1430. ff_tlog(h->c.avctx, "dct[%d][%d] = %- 4d end?:%d\n", scan_table[i&63]&7, scan_table[i&63] >> 3, level, i>62);
  1431. if (i > 62) {
  1432. i -= 192;
  1433. if (i & (~63)) {
  1434. av_log(h->c.avctx, AV_LOG_ERROR,
  1435. "ac-tex damaged at %d %d\n", h->c.mb_x, h->c.mb_y);
  1436. return AVERROR_INVALIDDATA;
  1437. }
  1438. block[scan_table[i]] = level;
  1439. break;
  1440. }
  1441. block[scan_table[i]] = level;
  1442. }
  1443. CLOSE_READER(re, &h->gb);
  1444. }
  1445. not_coded:
  1446. if (intra) {
  1447. if (!use_intra_dc_vlc) {
  1448. block[0] = mpeg4_get_level_dc(&h->c, n, pred, block[0]);
  1449. i -= i >> 31; // if (i == -1) i = 0;
  1450. }
  1451. ff_mpeg4_pred_ac(h, block, n, dc_pred_dir);
  1452. if (h->c.ac_pred)
  1453. i = 63; // FIXME not optimal
  1454. }
  1455. h->c.block_last_index[n] = i;
  1456. return 0;
  1457. }
  1458. /**
  1459. * decode partition C of one MB.
  1460. * @return <0 if an error occurred
  1461. */
  1462. static int mpeg4_decode_partitioned_mb(H263DecContext *const h)
  1463. {
  1464. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  1465. const int xy = h->c.mb_x + h->c.mb_y * h->c.mb_stride;
  1466. const int mb_type = h->c.cur_pic.mb_type[xy];
  1467. int cbp = h->c.cbp_table[xy];
  1468. const int use_intra_dc_vlc = h->c.qscale < ctx->intra_dc_threshold;
  1469. if (h->c.cur_pic.qscale_table[xy] != h->c.qscale)
  1470. ff_set_qscale(&h->c, h->c.cur_pic.qscale_table[xy]);
  1471. if (h->c.pict_type == AV_PICTURE_TYPE_P ||
  1472. h->c.pict_type == AV_PICTURE_TYPE_S) {
  1473. int i;
  1474. for (i = 0; i < 4; i++) {
  1475. h->c.mv[0][i][0] = h->c.cur_pic.motion_val[0][h->c.block_index[i]][0];
  1476. h->c.mv[0][i][1] = h->c.cur_pic.motion_val[0][h->c.block_index[i]][1];
  1477. }
  1478. h->c.mb_intra = IS_INTRA(mb_type);
  1479. if (IS_SKIP(mb_type)) {
  1480. /* skip mb */
  1481. for (i = 0; i < 6; i++)
  1482. h->c.block_last_index[i] = -1;
  1483. h->c.mv_dir = MV_DIR_FORWARD;
  1484. h->c.mv_type = MV_TYPE_16X16;
  1485. if (h->c.pict_type == AV_PICTURE_TYPE_S
  1486. && ctx->vol_sprite_usage == GMC_SPRITE) {
  1487. h->c.mcsel = 1;
  1488. h->c.mb_skipped = 0;
  1489. h->c.cur_pic.mbskip_table[xy] = 0;
  1490. } else {
  1491. h->c.mcsel = 0;
  1492. h->c.mb_skipped = 1;
  1493. h->c.cur_pic.mbskip_table[xy] = 1;
  1494. }
  1495. } else if (h->c.mb_intra) {
  1496. h->c.ac_pred = IS_ACPRED(h->c.cur_pic.mb_type[xy]);
  1497. } else if (!h->c.mb_intra) {
  1498. // h->c.mcsel = 0; // FIXME do we need to init that?
  1499. h->c.mv_dir = MV_DIR_FORWARD;
  1500. if (IS_8X8(mb_type)) {
  1501. h->c.mv_type = MV_TYPE_8X8;
  1502. } else {
  1503. h->c.mv_type = MV_TYPE_16X16;
  1504. }
  1505. }
  1506. } else { /* I-Frame */
  1507. h->c.mb_intra = 1;
  1508. h->c.ac_pred = IS_ACPRED(h->c.cur_pic.mb_type[xy]);
  1509. }
  1510. if (!IS_SKIP(mb_type)) {
  1511. int i;
  1512. h->c.bdsp.clear_blocks(h->block[0]);
  1513. /* decode each block */
  1514. for (i = 0; i < 6; i++) {
  1515. if (mpeg4_decode_block(ctx, h->block[i], i, cbp & 32, h->c.mb_intra,
  1516. use_intra_dc_vlc, ctx->rvlc) < 0) {
  1517. av_log(h->c.avctx, AV_LOG_ERROR,
  1518. "texture corrupted at %d %d %d\n",
  1519. h->c.mb_x, h->c.mb_y, h->c.mb_intra);
  1520. return AVERROR_INVALIDDATA;
  1521. }
  1522. cbp += cbp;
  1523. }
  1524. }
  1525. /* per-MB end of slice check */
  1526. if (--h->mb_num_left <= 0) {
  1527. if (mpeg4_is_resync(ctx))
  1528. return SLICE_END;
  1529. else
  1530. return SLICE_NOEND;
  1531. } else {
  1532. if (mpeg4_is_resync(ctx)) {
  1533. const int delta = h->c.mb_x + 1 == h->c.mb_width ? 2 : 1;
  1534. if (h->c.cbp_table[xy + delta])
  1535. return SLICE_END;
  1536. }
  1537. return SLICE_OK;
  1538. }
  1539. }
  1540. static int mpeg4_decode_mb(H263DecContext *const h)
  1541. {
  1542. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  1543. int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
  1544. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1545. const int xy = h->c.mb_x + h->c.mb_y * h->c.mb_stride;
  1546. int next;
  1547. av_assert2(h->c.h263_pred);
  1548. if (h->c.pict_type == AV_PICTURE_TYPE_P ||
  1549. h->c.pict_type == AV_PICTURE_TYPE_S) {
  1550. do {
  1551. if (get_bits1(&h->gb)) {
  1552. /* skip mb */
  1553. h->c.mb_intra = 0;
  1554. for (i = 0; i < 6; i++)
  1555. h->c.block_last_index[i] = -1;
  1556. h->c.mv_dir = MV_DIR_FORWARD;
  1557. h->c.mv_type = MV_TYPE_16X16;
  1558. if (h->c.pict_type == AV_PICTURE_TYPE_S &&
  1559. ctx->vol_sprite_usage == GMC_SPRITE) {
  1560. h->c.cur_pic.mb_type[xy] = MB_TYPE_SKIP |
  1561. MB_TYPE_GMC |
  1562. MB_TYPE_16x16 |
  1563. MB_TYPE_FORWARD_MV;
  1564. h->c.mcsel = 1;
  1565. h->c.mv[0][0][0] = get_amv(ctx, 0);
  1566. h->c.mv[0][0][1] = get_amv(ctx, 1);
  1567. h->c.cur_pic.mbskip_table[xy] = 0;
  1568. h->c.mb_skipped = 0;
  1569. } else {
  1570. h->c.cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 |
  1571. MB_TYPE_FORWARD_MV;
  1572. h->c.mcsel = 0;
  1573. h->c.mv[0][0][0] = 0;
  1574. h->c.mv[0][0][1] = 0;
  1575. h->c.cur_pic.mbskip_table[xy] = 1;
  1576. h->c.mb_skipped = 1;
  1577. }
  1578. goto end;
  1579. }
  1580. cbpc = get_vlc2(&h->gb, ff_h263_inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 2);
  1581. if (cbpc < 0) {
  1582. av_log(h->c.avctx, AV_LOG_ERROR,
  1583. "mcbpc damaged at %d %d\n", h->c.mb_x, h->c.mb_y);
  1584. return AVERROR_INVALIDDATA;
  1585. }
  1586. } while (cbpc == 20);
  1587. dquant = cbpc & 8;
  1588. h->c.mb_intra = ((cbpc & 4) != 0);
  1589. if (h->c.mb_intra)
  1590. goto intra;
  1591. h->c.bdsp.clear_blocks(h->block[0]);
  1592. if (h->c.pict_type == AV_PICTURE_TYPE_S &&
  1593. ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0)
  1594. h->c.mcsel = get_bits1(&h->gb);
  1595. else
  1596. h->c.mcsel = 0;
  1597. cbpy = get_vlc2(&h->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1) ^ 0x0F;
  1598. if (cbpy < 0) {
  1599. av_log(h->c.avctx, AV_LOG_ERROR,
  1600. "P cbpy damaged at %d %d\n", h->c.mb_x, h->c.mb_y);
  1601. return AVERROR_INVALIDDATA;
  1602. }
  1603. cbp = (cbpc & 3) | (cbpy << 2);
  1604. if (dquant)
  1605. ff_set_qscale(&h->c, h->c.qscale + quant_tab[get_bits(&h->gb, 2)]);
  1606. if ((!h->c.progressive_sequence) &&
  1607. (cbp || (h->c.workaround_bugs & FF_BUG_XVID_ILACE)))
  1608. h->c.interlaced_dct = get_bits1(&h->gb);
  1609. h->c.mv_dir = MV_DIR_FORWARD;
  1610. if ((cbpc & 16) == 0) {
  1611. if (h->c.mcsel) {
  1612. h->c.cur_pic.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 |
  1613. MB_TYPE_FORWARD_MV;
  1614. /* 16x16 global motion prediction */
  1615. h->c.mv_type = MV_TYPE_16X16;
  1616. mx = get_amv(ctx, 0);
  1617. my = get_amv(ctx, 1);
  1618. h->c.mv[0][0][0] = mx;
  1619. h->c.mv[0][0][1] = my;
  1620. } else if ((!h->c.progressive_sequence) && get_bits1(&h->gb)) {
  1621. h->c.cur_pic.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_FORWARD_MV |
  1622. MB_TYPE_INTERLACED;
  1623. /* 16x8 field motion prediction */
  1624. h->c.mv_type = MV_TYPE_FIELD;
  1625. h->c.field_select[0][0] = get_bits1(&h->gb);
  1626. h->c.field_select[0][1] = get_bits1(&h->gb);
  1627. ff_h263_pred_motion(&h->c, 0, 0, &pred_x, &pred_y);
  1628. for (i = 0; i < 2; i++) {
  1629. mx = ff_h263_decode_motion(h, pred_x, ctx->f_code);
  1630. if (mx >= 0xffff)
  1631. return AVERROR_INVALIDDATA;
  1632. my = ff_h263_decode_motion(h, pred_y / 2, ctx->f_code);
  1633. if (my >= 0xffff)
  1634. return AVERROR_INVALIDDATA;
  1635. h->c.mv[0][i][0] = mx;
  1636. h->c.mv[0][i][1] = my;
  1637. }
  1638. } else {
  1639. h->c.cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
  1640. /* 16x16 motion prediction */
  1641. h->c.mv_type = MV_TYPE_16X16;
  1642. ff_h263_pred_motion(&h->c, 0, 0, &pred_x, &pred_y);
  1643. mx = ff_h263_decode_motion(h, pred_x, ctx->f_code);
  1644. if (mx >= 0xffff)
  1645. return AVERROR_INVALIDDATA;
  1646. my = ff_h263_decode_motion(h, pred_y, ctx->f_code);
  1647. if (my >= 0xffff)
  1648. return AVERROR_INVALIDDATA;
  1649. h->c.mv[0][0][0] = mx;
  1650. h->c.mv[0][0][1] = my;
  1651. }
  1652. } else {
  1653. h->c.cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_FORWARD_MV;
  1654. h->c.mv_type = MV_TYPE_8X8;
  1655. for (i = 0; i < 4; i++) {
  1656. int16_t *mot_val = ff_h263_pred_motion(&h->c, i, 0, &pred_x, &pred_y);
  1657. mx = ff_h263_decode_motion(h, pred_x, ctx->f_code);
  1658. if (mx >= 0xffff)
  1659. return AVERROR_INVALIDDATA;
  1660. my = ff_h263_decode_motion(h, pred_y, ctx->f_code);
  1661. if (my >= 0xffff)
  1662. return AVERROR_INVALIDDATA;
  1663. h->c.mv[0][i][0] = mx;
  1664. h->c.mv[0][i][1] = my;
  1665. mot_val[0] = mx;
  1666. mot_val[1] = my;
  1667. }
  1668. }
  1669. } else if (h->c.pict_type == AV_PICTURE_TYPE_B) {
  1670. int modb1; // first bit of modb
  1671. int modb2; // second bit of modb
  1672. int mb_type;
  1673. h->c.mb_intra = 0; // B-frames never contain intra blocks
  1674. h->c.mcsel = 0; // ... true gmc blocks
  1675. if (h->c.mb_x == 0) {
  1676. for (i = 0; i < 2; i++) {
  1677. h->c.last_mv[i][0][0] =
  1678. h->c.last_mv[i][0][1] =
  1679. h->c.last_mv[i][1][0] =
  1680. h->c.last_mv[i][1][1] = 0;
  1681. }
  1682. ff_thread_progress_await(&h->c.next_pic.ptr->progress, h->c.mb_y);
  1683. }
  1684. /* if we skipped it in the future P-frame than skip it now too */
  1685. h->c.mb_skipped = h->c.next_pic.mbskip_table[h->c.mb_y * h->c.mb_stride + h->c.mb_x]; // Note, skiptab=0 if last was GMC
  1686. if (h->c.mb_skipped) {
  1687. /* skip mb */
  1688. for (i = 0; i < 6; i++)
  1689. h->c.block_last_index[i] = -1;
  1690. h->c.mv_dir = MV_DIR_FORWARD;
  1691. h->c.mv_type = MV_TYPE_16X16;
  1692. h->c.mv[0][0][0] =
  1693. h->c.mv[0][0][1] =
  1694. h->c.mv[1][0][0] =
  1695. h->c.mv[1][0][1] = 0;
  1696. h->c.cur_pic.mb_type[xy] = MB_TYPE_SKIP |
  1697. MB_TYPE_16x16 |
  1698. MB_TYPE_FORWARD_MV;
  1699. goto end;
  1700. }
  1701. modb1 = get_bits1(&h->gb);
  1702. if (modb1) {
  1703. // like MB_TYPE_B_DIRECT but no vectors coded
  1704. mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_BIDIR_MV;
  1705. cbp = 0;
  1706. } else {
  1707. modb2 = get_bits1(&h->gb);
  1708. mb_type = get_vlc2(&h->gb, mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 1);
  1709. if (mb_type < 0) {
  1710. av_log(h->c.avctx, AV_LOG_ERROR, "illegal MB_type\n");
  1711. return AVERROR_INVALIDDATA;
  1712. }
  1713. if (modb2) {
  1714. cbp = 0;
  1715. } else {
  1716. h->c.bdsp.clear_blocks(h->block[0]);
  1717. cbp = get_bits(&h->gb, 6);
  1718. }
  1719. if ((!IS_DIRECT(mb_type)) && cbp) {
  1720. if (get_bits1(&h->gb))
  1721. ff_set_qscale(&h->c, h->c.qscale + get_bits1(&h->gb) * 4 - 2);
  1722. }
  1723. if (!h->c.progressive_sequence) {
  1724. if (cbp)
  1725. h->c.interlaced_dct = get_bits1(&h->gb);
  1726. if (!IS_DIRECT(mb_type) && get_bits1(&h->gb)) {
  1727. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1728. mb_type &= ~MB_TYPE_16x16;
  1729. if (HAS_FORWARD_MV(mb_type)) {
  1730. h->c.field_select[0][0] = get_bits1(&h->gb);
  1731. h->c.field_select[0][1] = get_bits1(&h->gb);
  1732. }
  1733. if (HAS_BACKWARD_MV(mb_type)) {
  1734. h->c.field_select[1][0] = get_bits1(&h->gb);
  1735. h->c.field_select[1][1] = get_bits1(&h->gb);
  1736. }
  1737. }
  1738. }
  1739. h->c.mv_dir = 0;
  1740. if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) {
  1741. h->c.mv_type = MV_TYPE_16X16;
  1742. if (HAS_FORWARD_MV(mb_type)) {
  1743. h->c.mv_dir = MV_DIR_FORWARD;
  1744. mx = ff_h263_decode_motion(h, h->c.last_mv[0][0][0], ctx->f_code);
  1745. my = ff_h263_decode_motion(h, h->c.last_mv[0][0][1], ctx->f_code);
  1746. h->c.last_mv[0][1][0] =
  1747. h->c.last_mv[0][0][0] =
  1748. h->c.mv[0][0][0] = mx;
  1749. h->c.last_mv[0][1][1] =
  1750. h->c.last_mv[0][0][1] =
  1751. h->c.mv[0][0][1] = my;
  1752. }
  1753. if (HAS_BACKWARD_MV(mb_type)) {
  1754. h->c.mv_dir |= MV_DIR_BACKWARD;
  1755. mx = ff_h263_decode_motion(h, h->c.last_mv[1][0][0], ctx->b_code);
  1756. my = ff_h263_decode_motion(h, h->c.last_mv[1][0][1], ctx->b_code);
  1757. h->c.last_mv[1][1][0] =
  1758. h->c.last_mv[1][0][0] =
  1759. h->c.mv[1][0][0] = mx;
  1760. h->c.last_mv[1][1][1] =
  1761. h->c.last_mv[1][0][1] =
  1762. h->c.mv[1][0][1] = my;
  1763. }
  1764. } else if (!IS_DIRECT(mb_type)) {
  1765. h->c.mv_type = MV_TYPE_FIELD;
  1766. if (HAS_FORWARD_MV(mb_type)) {
  1767. h->c.mv_dir = MV_DIR_FORWARD;
  1768. for (i = 0; i < 2; i++) {
  1769. mx = ff_h263_decode_motion(h, h->c.last_mv[0][i][0], ctx->f_code);
  1770. my = ff_h263_decode_motion(h, h->c.last_mv[0][i][1] / 2, ctx->f_code);
  1771. h->c.last_mv[0][i][0] =
  1772. h->c.mv[0][i][0] = mx;
  1773. h->c.last_mv[0][i][1] = (h->c.mv[0][i][1] = my) * 2;
  1774. }
  1775. }
  1776. if (HAS_BACKWARD_MV(mb_type)) {
  1777. h->c.mv_dir |= MV_DIR_BACKWARD;
  1778. for (i = 0; i < 2; i++) {
  1779. mx = ff_h263_decode_motion(h, h->c.last_mv[1][i][0], ctx->b_code);
  1780. my = ff_h263_decode_motion(h, h->c.last_mv[1][i][1] / 2, ctx->b_code);
  1781. h->c.last_mv[1][i][0] =
  1782. h->c.mv[1][i][0] = mx;
  1783. h->c.last_mv[1][i][1] = (h->c.mv[1][i][1] = my) * 2;
  1784. }
  1785. }
  1786. }
  1787. }
  1788. if (IS_DIRECT(mb_type)) {
  1789. if (IS_SKIP(mb_type)) {
  1790. mx =
  1791. my = 0;
  1792. } else {
  1793. mx = ff_h263_decode_motion(h, 0, 1);
  1794. my = ff_h263_decode_motion(h, 0, 1);
  1795. }
  1796. h->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
  1797. mb_type |= ff_mpeg4_set_direct_mv(&h->c, mx, my);
  1798. }
  1799. h->c.cur_pic.mb_type[xy] = mb_type;
  1800. } else { /* I-Frame */
  1801. int use_intra_dc_vlc;
  1802. do {
  1803. cbpc = get_vlc2(&h->gb, ff_h263_intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 2);
  1804. if (cbpc < 0) {
  1805. av_log(h->c.avctx, AV_LOG_ERROR,
  1806. "I cbpc damaged at %d %d\n", h->c.mb_x, h->c.mb_y);
  1807. return AVERROR_INVALIDDATA;
  1808. }
  1809. } while (cbpc == 8);
  1810. dquant = cbpc & 4;
  1811. h->c.mb_intra = 1;
  1812. intra:
  1813. h->c.ac_pred = get_bits1(&h->gb);
  1814. if (h->c.ac_pred)
  1815. h->c.cur_pic.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
  1816. else
  1817. h->c.cur_pic.mb_type[xy] = MB_TYPE_INTRA;
  1818. cbpy = get_vlc2(&h->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
  1819. if (cbpy < 0) {
  1820. av_log(h->c.avctx, AV_LOG_ERROR,
  1821. "I cbpy damaged at %d %d\n", h->c.mb_x, h->c.mb_y);
  1822. return AVERROR_INVALIDDATA;
  1823. }
  1824. cbp = (cbpc & 3) | (cbpy << 2);
  1825. use_intra_dc_vlc = h->c.qscale < ctx->intra_dc_threshold;
  1826. if (dquant)
  1827. ff_set_qscale(&h->c, h->c.qscale + quant_tab[get_bits(&h->gb, 2)]);
  1828. if (!h->c.progressive_sequence)
  1829. h->c.interlaced_dct = get_bits1(&h->gb);
  1830. h->c.bdsp.clear_blocks(h->block[0]);
  1831. /* decode each block */
  1832. for (i = 0; i < 6; i++) {
  1833. if (mpeg4_decode_block(ctx, h->block[i], i, cbp & 32,
  1834. 1, use_intra_dc_vlc, 0) < 0)
  1835. return AVERROR_INVALIDDATA;
  1836. cbp += cbp;
  1837. }
  1838. goto end;
  1839. }
  1840. /* decode each block */
  1841. for (i = 0; i < 6; i++) {
  1842. if (mpeg4_decode_block(ctx, h->block[i], i, cbp & 32, 0, 0, 0) < 0)
  1843. return AVERROR_INVALIDDATA;
  1844. cbp += cbp;
  1845. }
  1846. end:
  1847. /* per-MB end of slice check */
  1848. next = mpeg4_is_resync(ctx);
  1849. if (next) {
  1850. if (h->c.mb_x + h->c.mb_y*h->c.mb_width + 1 > next && (h->c.avctx->err_recognition & AV_EF_AGGRESSIVE)) {
  1851. return AVERROR_INVALIDDATA;
  1852. } else if (h->c.mb_x + h->c.mb_y*h->c.mb_width + 1 >= next)
  1853. return SLICE_END;
  1854. if (h->c.pict_type == AV_PICTURE_TYPE_B) {
  1855. const int delta = h->c.mb_x + 1 == h->c.mb_width ? 2 : 1;
  1856. ff_thread_progress_await(&h->c.next_pic.ptr->progress,
  1857. (h->c.mb_x + delta >= h->c.mb_width)
  1858. ? FFMIN(h->c.mb_y + 1, h->c.mb_height - 1)
  1859. : h->c.mb_y);
  1860. if (h->c.next_pic.mbskip_table[xy + delta])
  1861. return SLICE_OK;
  1862. }
  1863. return SLICE_END;
  1864. }
  1865. return SLICE_OK;
  1866. }
  1867. /* As per spec, studio start code search isn't the same as the old type of start code */
  1868. static void next_start_code_studio(GetBitContext *gb)
  1869. {
  1870. align_get_bits(gb);
  1871. while (get_bits_left(gb) >= 24 && show_bits(gb, 24) != 0x1) {
  1872. get_bits(gb, 8);
  1873. }
  1874. }
  1875. /* additional_code, vlc index */
  1876. static const uint8_t ac_state_tab[22][2] =
  1877. {
  1878. {0, 0},
  1879. {0, 1},
  1880. {1, 1},
  1881. {2, 1},
  1882. {3, 1},
  1883. {4, 1},
  1884. {5, 1},
  1885. {1, 2},
  1886. {2, 2},
  1887. {3, 2},
  1888. {4, 2},
  1889. {5, 2},
  1890. {6, 2},
  1891. {1, 3},
  1892. {2, 4},
  1893. {3, 5},
  1894. {4, 6},
  1895. {5, 7},
  1896. {6, 8},
  1897. {7, 9},
  1898. {8, 10},
  1899. {0, 11}
  1900. };
  1901. static int mpeg4_decode_studio_block(Mpeg4DecContext *const ctx, int32_t block[64], int n)
  1902. {
  1903. H263DecContext *const h = &ctx->h;
  1904. int cc, dct_dc_size, dct_diff, code, j, idx = 1, group = 0, run = 0,
  1905. additional_code_len, sign, mismatch;
  1906. const VLCElem *cur_vlc = studio_intra_tab[0];
  1907. const uint8_t *const scantable = h->c.intra_scantable.permutated;
  1908. const uint16_t *quant_matrix;
  1909. uint32_t flc;
  1910. const int min = -1 * (1 << (h->c.avctx->bits_per_raw_sample + 6));
  1911. const int max = ((1 << (h->c.avctx->bits_per_raw_sample + 6)) - 1);
  1912. int shift = 3 - ctx->dct_precision;
  1913. mismatch = 1;
  1914. memset(block, 0, 64 * sizeof(int32_t));
  1915. if (n < 4) {
  1916. cc = 0;
  1917. dct_dc_size = get_vlc2(&h->gb, studio_luma_dc, STUDIO_INTRA_BITS, 2);
  1918. quant_matrix = h->c.intra_matrix;
  1919. } else {
  1920. cc = (n & 1) + 1;
  1921. if (ctx->rgb)
  1922. dct_dc_size = get_vlc2(&h->gb, studio_luma_dc, STUDIO_INTRA_BITS, 2);
  1923. else
  1924. dct_dc_size = get_vlc2(&h->gb, studio_chroma_dc, STUDIO_INTRA_BITS, 2);
  1925. quant_matrix = h->c.chroma_intra_matrix;
  1926. }
  1927. if (dct_dc_size == 0) {
  1928. dct_diff = 0;
  1929. } else {
  1930. dct_diff = get_xbits(&h->gb, dct_dc_size);
  1931. if (dct_dc_size > 8) {
  1932. if(!check_marker(h->c.avctx, &h->gb, "dct_dc_size > 8"))
  1933. return AVERROR_INVALIDDATA;
  1934. }
  1935. }
  1936. h->last_dc[cc] += dct_diff;
  1937. if (ctx->mpeg_quant)
  1938. block[0] = h->last_dc[cc] * (8 >> h->c.intra_dc_precision);
  1939. else
  1940. block[0] = h->last_dc[cc] * (8 >> h->c.intra_dc_precision) * (8 >> ctx->dct_precision);
  1941. /* TODO: support mpeg_quant for AC coefficients */
  1942. block[0] = av_clip(block[0], min, max);
  1943. mismatch ^= block[0];
  1944. /* AC Coefficients */
  1945. while (1) {
  1946. group = get_vlc2(&h->gb, cur_vlc, STUDIO_INTRA_BITS, 2);
  1947. if (group < 0) {
  1948. av_log(h->c.avctx, AV_LOG_ERROR, "illegal ac coefficient group vlc\n");
  1949. return AVERROR_INVALIDDATA;
  1950. }
  1951. additional_code_len = ac_state_tab[group][0];
  1952. cur_vlc = studio_intra_tab[ac_state_tab[group][1]];
  1953. if (group == 0) {
  1954. /* End of Block */
  1955. break;
  1956. } else if (group >= 1 && group <= 6) {
  1957. /* Zero run length (Table B.47) */
  1958. run = 1 << additional_code_len;
  1959. if (additional_code_len)
  1960. run += get_bits(&h->gb, additional_code_len);
  1961. idx += run;
  1962. continue;
  1963. } else if (group >= 7 && group <= 12) {
  1964. /* Zero run length and +/-1 level (Table B.48) */
  1965. code = get_bits(&h->gb, additional_code_len);
  1966. sign = code & 1;
  1967. code >>= 1;
  1968. run = (1 << (additional_code_len - 1)) + code;
  1969. idx += run;
  1970. if (idx > 63)
  1971. return AVERROR_INVALIDDATA;
  1972. j = scantable[idx++];
  1973. block[j] = sign ? 1 : -1;
  1974. } else if (group >= 13 && group <= 20) {
  1975. /* Level value (Table B.49) */
  1976. if (idx > 63)
  1977. return AVERROR_INVALIDDATA;
  1978. j = scantable[idx++];
  1979. block[j] = get_xbits(&h->gb, additional_code_len);
  1980. } else if (group == 21) {
  1981. /* Escape */
  1982. if (idx > 63)
  1983. return AVERROR_INVALIDDATA;
  1984. j = scantable[idx++];
  1985. additional_code_len = h->c.avctx->bits_per_raw_sample + ctx->dct_precision + 4;
  1986. flc = get_bits(&h->gb, additional_code_len);
  1987. if (flc >> (additional_code_len-1))
  1988. block[j] = -1 * (( flc ^ ((1 << additional_code_len) -1)) + 1);
  1989. else
  1990. block[j] = flc;
  1991. }
  1992. block[j] = ((block[j] * quant_matrix[j] * h->c.qscale) * (1 << shift)) / 16;
  1993. block[j] = av_clip(block[j], min, max);
  1994. mismatch ^= block[j];
  1995. }
  1996. block[63] ^= mismatch & 1;
  1997. return 0;
  1998. }
  1999. static int mpeg4_decode_dpcm_macroblock(Mpeg4DecContext *const ctx,
  2000. int16_t macroblock[256], int n)
  2001. {
  2002. H263DecContext *const h = &ctx->h;
  2003. int j, w, height, idx = 0;
  2004. int block_mean, rice_parameter, rice_prefix_code, rice_suffix_code,
  2005. dpcm_residual, left, top, topleft, min_left_top, max_left_top, p, p2, output;
  2006. height = 16 >> (n ? h->c.chroma_y_shift : 0);
  2007. w = 16 >> (n ? h->c.chroma_x_shift : 0);
  2008. block_mean = get_bits(&h->gb, h->c.avctx->bits_per_raw_sample);
  2009. if (block_mean == 0){
  2010. av_log(h->c.avctx, AV_LOG_ERROR, "Forbidden block_mean\n");
  2011. return AVERROR_INVALIDDATA;
  2012. }
  2013. h->last_dc[n] = block_mean * (1 << (ctx->dct_precision + h->c.intra_dc_precision));
  2014. rice_parameter = get_bits(&h->gb, 4);
  2015. if (rice_parameter == 0) {
  2016. av_log(h->c.avctx, AV_LOG_ERROR, "Forbidden rice_parameter\n");
  2017. return AVERROR_INVALIDDATA;
  2018. }
  2019. if (rice_parameter == 15)
  2020. rice_parameter = 0;
  2021. if (rice_parameter > 11) {
  2022. av_log(h->c.avctx, AV_LOG_ERROR, "Forbidden rice_parameter\n");
  2023. return AVERROR_INVALIDDATA;
  2024. }
  2025. for (int i = 0; i < height; i++) {
  2026. output = 1 << (h->c.avctx->bits_per_raw_sample - 1);
  2027. top = 1 << (h->c.avctx->bits_per_raw_sample - 1);
  2028. for (j = 0; j < w; j++) {
  2029. left = output;
  2030. topleft = top;
  2031. rice_prefix_code = get_unary(&h->gb, 1, 12);
  2032. /* Escape */
  2033. if (rice_prefix_code == 11)
  2034. dpcm_residual = get_bits(&h->gb, h->c.avctx->bits_per_raw_sample);
  2035. else {
  2036. if (rice_prefix_code == 12) {
  2037. av_log(h->c.avctx, AV_LOG_ERROR, "Forbidden rice_prefix_code\n");
  2038. return AVERROR_INVALIDDATA;
  2039. }
  2040. rice_suffix_code = get_bitsz(&h->gb, rice_parameter);
  2041. dpcm_residual = (rice_prefix_code << rice_parameter) + rice_suffix_code;
  2042. }
  2043. /* Map to a signed residual */
  2044. if (dpcm_residual & 1)
  2045. dpcm_residual = (-1 * dpcm_residual) >> 1;
  2046. else
  2047. dpcm_residual = (dpcm_residual >> 1);
  2048. if (i != 0)
  2049. top = macroblock[idx-w];
  2050. p = left + top - topleft;
  2051. min_left_top = FFMIN(left, top);
  2052. if (p < min_left_top)
  2053. p = min_left_top;
  2054. max_left_top = FFMAX(left, top);
  2055. if (p > max_left_top)
  2056. p = max_left_top;
  2057. p2 = (FFMIN(min_left_top, topleft) + FFMAX(max_left_top, topleft)) >> 1;
  2058. if (p2 == p)
  2059. p2 = block_mean;
  2060. if (p2 > p)
  2061. dpcm_residual *= -1;
  2062. macroblock[idx++] = output = (dpcm_residual + p) & ((1 << h->c.avctx->bits_per_raw_sample) - 1);
  2063. }
  2064. }
  2065. return 0;
  2066. }
  2067. static int mpeg4_decode_studio_mb(H263DecContext *const h)
  2068. {
  2069. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  2070. int i;
  2071. ctx->dpcm_direction = 0;
  2072. /* StudioMacroblock */
  2073. /* Assumes I-VOP */
  2074. h->c.mb_intra = 1;
  2075. if (get_bits1(&h->gb)) { /* compression_mode */
  2076. /* DCT */
  2077. /* macroblock_type, 1 or 2-bit VLC */
  2078. if (!get_bits1(&h->gb)) {
  2079. skip_bits1(&h->gb);
  2080. h->c.qscale = mpeg_get_qscale(&h->gb, h->c.q_scale_type);
  2081. }
  2082. for (i = 0; i < mpeg4_block_count[h->c.chroma_format]; i++) {
  2083. if (mpeg4_decode_studio_block(ctx, ctx->block32[i], i) < 0)
  2084. return AVERROR_INVALIDDATA;
  2085. }
  2086. } else {
  2087. /* DPCM */
  2088. check_marker(h->c.avctx, &h->gb, "DPCM block start");
  2089. ctx->dpcm_direction = get_bits1(&h->gb) ? -1 : 1;
  2090. for (i = 0; i < 3; i++) {
  2091. if (mpeg4_decode_dpcm_macroblock(ctx, ctx->dpcm_macroblock[i], i) < 0)
  2092. return AVERROR_INVALIDDATA;
  2093. }
  2094. }
  2095. if (get_bits_left(&h->gb) >= 24 && show_bits(&h->gb, 23) == 0) {
  2096. next_start_code_studio(&h->gb);
  2097. return SLICE_END;
  2098. }
  2099. //vcon-stp9L1.bits (first frame)
  2100. if (get_bits_left(&h->gb) == 0)
  2101. return SLICE_END;
  2102. //vcon-stp2L1.bits, vcon-stp3L1.bits, vcon-stp6L1.bits, vcon-stp7L1.bits, vcon-stp8L1.bits, vcon-stp10L1.bits (first frame)
  2103. if (get_bits_left(&h->gb) < 8U && show_bits(&h->gb, get_bits_left(&h->gb)) == 0)
  2104. return SLICE_END;
  2105. return SLICE_OK;
  2106. }
  2107. static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
  2108. {
  2109. int hours, minutes, seconds;
  2110. if (!show_bits(gb, 23)) {
  2111. av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n");
  2112. return AVERROR_INVALIDDATA;
  2113. }
  2114. hours = get_bits(gb, 5);
  2115. minutes = get_bits(gb, 6);
  2116. check_marker(s->avctx, gb, "in gop_header");
  2117. seconds = get_bits(gb, 6);
  2118. s->time_base = seconds + 60*(minutes + 60*hours);
  2119. skip_bits1(gb);
  2120. skip_bits1(gb);
  2121. return 0;
  2122. }
  2123. static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int *profile, int *level)
  2124. {
  2125. *profile = get_bits(gb, 4);
  2126. *level = get_bits(gb, 4);
  2127. // for Simple profile, level 0
  2128. if (*profile == 0 && *level == 8) {
  2129. *level = 0;
  2130. }
  2131. return 0;
  2132. }
  2133. static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb)
  2134. {
  2135. int visual_object_type;
  2136. int is_visual_object_identifier = get_bits1(gb);
  2137. if (is_visual_object_identifier) {
  2138. skip_bits(gb, 4+3);
  2139. }
  2140. visual_object_type = get_bits(gb, 4);
  2141. if (visual_object_type == VOT_VIDEO_ID ||
  2142. visual_object_type == VOT_STILL_TEXTURE_ID) {
  2143. int video_signal_type = get_bits1(gb);
  2144. if (video_signal_type) {
  2145. int video_range, color_description;
  2146. skip_bits(gb, 3); // video_format
  2147. video_range = get_bits1(gb);
  2148. color_description = get_bits1(gb);
  2149. s->avctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  2150. if (color_description) {
  2151. s->avctx->color_primaries = get_bits(gb, 8);
  2152. s->avctx->color_trc = get_bits(gb, 8);
  2153. s->avctx->colorspace = get_bits(gb, 8);
  2154. }
  2155. }
  2156. }
  2157. return 0;
  2158. }
  2159. static void mpeg4_load_default_matrices(MpegEncContext *s)
  2160. {
  2161. int i, v;
  2162. /* load default matrices */
  2163. for (i = 0; i < 64; i++) {
  2164. int j = s->idsp.idct_permutation[i];
  2165. v = ff_mpeg4_default_intra_matrix[i];
  2166. s->intra_matrix[j] = v;
  2167. s->chroma_intra_matrix[j] = v;
  2168. v = ff_mpeg4_default_non_intra_matrix[i];
  2169. s->inter_matrix[j] = v;
  2170. s->chroma_inter_matrix[j] = v;
  2171. }
  2172. }
  2173. static int read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb)
  2174. {
  2175. int i, j, v;
  2176. if (get_bits1(gb)) {
  2177. if (get_bits_left(gb) < 64*8)
  2178. return AVERROR_INVALIDDATA;
  2179. /* intra_quantiser_matrix */
  2180. for (i = 0; i < 64; i++) {
  2181. v = get_bits(gb, 8);
  2182. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  2183. s->intra_matrix[j] = v;
  2184. s->chroma_intra_matrix[j] = v;
  2185. }
  2186. }
  2187. if (get_bits1(gb)) {
  2188. if (get_bits_left(gb) < 64*8)
  2189. return AVERROR_INVALIDDATA;
  2190. /* non_intra_quantiser_matrix */
  2191. for (i = 0; i < 64; i++) {
  2192. get_bits(gb, 8);
  2193. }
  2194. }
  2195. if (get_bits1(gb)) {
  2196. if (get_bits_left(gb) < 64*8)
  2197. return AVERROR_INVALIDDATA;
  2198. /* chroma_intra_quantiser_matrix */
  2199. for (i = 0; i < 64; i++) {
  2200. v = get_bits(gb, 8);
  2201. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  2202. s->chroma_intra_matrix[j] = v;
  2203. }
  2204. }
  2205. if (get_bits1(gb)) {
  2206. if (get_bits_left(gb) < 64*8)
  2207. return AVERROR_INVALIDDATA;
  2208. /* chroma_non_intra_quantiser_matrix */
  2209. for (i = 0; i < 64; i++) {
  2210. get_bits(gb, 8);
  2211. }
  2212. }
  2213. next_start_code_studio(gb);
  2214. return 0;
  2215. }
  2216. static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id)
  2217. {
  2218. uint32_t startcode;
  2219. uint8_t extension_type;
  2220. startcode = show_bits_long(gb, 32);
  2221. if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) {
  2222. if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) {
  2223. skip_bits_long(gb, 32);
  2224. extension_type = get_bits(gb, 4);
  2225. if (extension_type == QUANT_MATRIX_EXT_ID)
  2226. read_quant_matrix_ext(s, gb);
  2227. }
  2228. }
  2229. }
  2230. static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2231. {
  2232. MPVContext *const s = &ctx->h.c;
  2233. int width, height, aspect_ratio_info;
  2234. int bits_per_raw_sample;
  2235. int rgb, chroma_format;
  2236. // random_accessible_vol and video_object_type_indication have already
  2237. // been read by the caller decode_vol_header()
  2238. skip_bits(gb, 4); /* video_object_layer_verid */
  2239. ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */
  2240. skip_bits(gb, 4); /* video_object_layer_shape_extension */
  2241. skip_bits1(gb); /* progressive_sequence */
  2242. if (ctx->shape != RECT_SHAPE) {
  2243. avpriv_request_sample(s->avctx, "MPEG-4 Studio profile non rectangular shape");
  2244. return AVERROR_PATCHWELCOME;
  2245. }
  2246. if (ctx->shape != BIN_ONLY_SHAPE) {
  2247. rgb = get_bits1(gb); /* rgb_components */
  2248. chroma_format = get_bits(gb, 2); /* chroma_format */
  2249. if (!chroma_format || chroma_format == CHROMA_420 || (rgb && chroma_format == CHROMA_422)) {
  2250. av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
  2251. return AVERROR_INVALIDDATA;
  2252. }
  2253. bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */
  2254. if (bits_per_raw_sample == 10) {
  2255. if (rgb) {
  2256. s->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
  2257. } else {
  2258. s->avctx->pix_fmt = chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10;
  2259. }
  2260. } else {
  2261. avpriv_request_sample(s->avctx, "MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample);
  2262. return AVERROR_PATCHWELCOME;
  2263. }
  2264. if (rgb != ctx->rgb || s->chroma_format != chroma_format)
  2265. s->context_reinit = 1;
  2266. s->avctx->bits_per_raw_sample = bits_per_raw_sample;
  2267. ctx->rgb = rgb;
  2268. s->chroma_format = chroma_format;
  2269. }
  2270. if (ctx->shape == RECT_SHAPE) {
  2271. check_marker(s->avctx, gb, "before video_object_layer_width");
  2272. width = get_bits(gb, 14); /* video_object_layer_width */
  2273. check_marker(s->avctx, gb, "before video_object_layer_height");
  2274. height = get_bits(gb, 14); /* video_object_layer_height */
  2275. check_marker(s->avctx, gb, "after video_object_layer_height");
  2276. /* Do the same check as non-studio profile */
  2277. if (width && height) {
  2278. if (s->width && s->height &&
  2279. (s->width != width || s->height != height))
  2280. s->context_reinit = 1;
  2281. s->width = width;
  2282. s->height = height;
  2283. }
  2284. }
  2285. aspect_ratio_info = get_bits(gb, 4);
  2286. if (aspect_ratio_info == FF_ASPECT_EXTENDED) {
  2287. s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
  2288. s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
  2289. } else {
  2290. s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[aspect_ratio_info];
  2291. }
  2292. skip_bits(gb, 4); /* frame_rate_code */
  2293. skip_bits(gb, 15); /* first_half_bit_rate */
  2294. check_marker(s->avctx, gb, "after first_half_bit_rate");
  2295. skip_bits(gb, 15); /* latter_half_bit_rate */
  2296. check_marker(s->avctx, gb, "after latter_half_bit_rate");
  2297. skip_bits(gb, 15); /* first_half_vbv_buffer_size */
  2298. check_marker(s->avctx, gb, "after first_half_vbv_buffer_size");
  2299. skip_bits(gb, 3); /* latter_half_vbv_buffer_size */
  2300. skip_bits(gb, 11); /* first_half_vbv_buffer_size */
  2301. check_marker(s->avctx, gb, "after first_half_vbv_buffer_size");
  2302. skip_bits(gb, 15); /* latter_half_vbv_occupancy */
  2303. check_marker(s->avctx, gb, "after latter_half_vbv_occupancy");
  2304. s->low_delay = get_bits1(gb);
  2305. ctx->mpeg_quant = get_bits1(gb); /* mpeg2_stream */
  2306. next_start_code_studio(gb);
  2307. extension_and_user_data(s, gb, 2);
  2308. return 0;
  2309. }
  2310. static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2311. {
  2312. H263DecContext *const h = &ctx->h;
  2313. int width, height, vo_ver_id, aspect_ratio_info;
  2314. /* vol header */
  2315. skip_bits(gb, 1); /* random access */
  2316. ctx->vo_type = get_bits(gb, 8);
  2317. /* If we are in studio profile (per vo_type), check if its all consistent
  2318. * and if so continue pass control to decode_studio_vol_header().
  2319. * elIf something is inconsistent, error out
  2320. * else continue with (non studio) vol header decpoding.
  2321. */
  2322. if (ctx->vo_type == CORE_STUDIO_VO_TYPE ||
  2323. ctx->vo_type == SIMPLE_STUDIO_VO_TYPE) {
  2324. if (h->c.avctx->profile != AV_PROFILE_UNKNOWN && h->c.avctx->profile != AV_PROFILE_MPEG4_SIMPLE_STUDIO)
  2325. return AVERROR_INVALIDDATA;
  2326. h->c.studio_profile = 1;
  2327. h->c.avctx->profile = AV_PROFILE_MPEG4_SIMPLE_STUDIO;
  2328. return decode_studio_vol_header(ctx, gb);
  2329. } else if (h->c.studio_profile) {
  2330. return AVERROR_PATCHWELCOME;
  2331. }
  2332. if (get_bits1(gb) != 0) { /* is_ol_id */
  2333. vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
  2334. skip_bits(gb, 3); /* vo_priority */
  2335. } else {
  2336. vo_ver_id = 1;
  2337. }
  2338. aspect_ratio_info = get_bits(gb, 4);
  2339. if (aspect_ratio_info == FF_ASPECT_EXTENDED) {
  2340. h->c.avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
  2341. h->c.avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
  2342. } else {
  2343. h->c.avctx->sample_aspect_ratio = ff_h263_pixel_aspect[aspect_ratio_info];
  2344. }
  2345. if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
  2346. int chroma_format = get_bits(gb, 2);
  2347. if (chroma_format != CHROMA_420)
  2348. av_log(h->c.avctx, AV_LOG_ERROR, "illegal chroma format\n");
  2349. h->c.low_delay = get_bits1(gb);
  2350. if (get_bits1(gb)) { /* vbv parameters */
  2351. get_bits(gb, 15); /* first_half_bitrate */
  2352. check_marker(h->c.avctx, gb, "after first_half_bitrate");
  2353. get_bits(gb, 15); /* latter_half_bitrate */
  2354. check_marker(h->c.avctx, gb, "after latter_half_bitrate");
  2355. get_bits(gb, 15); /* first_half_vbv_buffer_size */
  2356. check_marker(h->c.avctx, gb, "after first_half_vbv_buffer_size");
  2357. get_bits(gb, 3); /* latter_half_vbv_buffer_size */
  2358. get_bits(gb, 11); /* first_half_vbv_occupancy */
  2359. check_marker(h->c.avctx, gb, "after first_half_vbv_occupancy");
  2360. get_bits(gb, 15); /* latter_half_vbv_occupancy */
  2361. check_marker(h->c.avctx, gb, "after latter_half_vbv_occupancy");
  2362. }
  2363. } else {
  2364. /* is setting low delay flag only once the smartest thing to do?
  2365. * low delay detection will not be overridden. */
  2366. if (h->picture_number == 0) {
  2367. switch (ctx->vo_type) {
  2368. case SIMPLE_VO_TYPE:
  2369. case ADV_SIMPLE_VO_TYPE:
  2370. h->c.low_delay = 1;
  2371. break;
  2372. default:
  2373. h->c.low_delay = 0;
  2374. }
  2375. }
  2376. }
  2377. ctx->shape = get_bits(gb, 2); /* vol shape */
  2378. if (ctx->shape != RECT_SHAPE)
  2379. av_log(h->c.avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
  2380. if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
  2381. av_log(h->c.avctx, AV_LOG_ERROR, "Gray shape not supported\n");
  2382. skip_bits(gb, 4); /* video_object_layer_shape_extension */
  2383. }
  2384. check_marker(h->c.avctx, gb, "before time_increment_resolution");
  2385. h->c.avctx->framerate.num = get_bits(gb, 16);
  2386. if (!h->c.avctx->framerate.num) {
  2387. av_log(h->c.avctx, AV_LOG_ERROR, "framerate==0\n");
  2388. return AVERROR_INVALIDDATA;
  2389. }
  2390. ctx->time_increment_bits = av_log2(h->c.avctx->framerate.num - 1) + 1;
  2391. if (ctx->time_increment_bits < 1)
  2392. ctx->time_increment_bits = 1;
  2393. check_marker(h->c.avctx, gb, "before fixed_vop_rate");
  2394. if (get_bits1(gb) != 0) /* fixed_vop_rate */
  2395. h->c.avctx->framerate.den = get_bits(gb, ctx->time_increment_bits);
  2396. else
  2397. h->c.avctx->framerate.den = 1;
  2398. ctx->t_frame = 0;
  2399. if (ctx->shape != BIN_ONLY_SHAPE) {
  2400. if (ctx->shape == RECT_SHAPE) {
  2401. check_marker(h->c.avctx, gb, "before width");
  2402. width = get_bits(gb, 13);
  2403. check_marker(h->c.avctx, gb, "before height");
  2404. height = get_bits(gb, 13);
  2405. check_marker(h->c.avctx, gb, "after height");
  2406. if (width && height && /* they should be non zero but who knows */
  2407. !(h->c.width && h->c.codec_tag == AV_RL32("MP4S"))) {
  2408. if (h->c.width && h->c.height &&
  2409. (h->c.width != width || h->c.height != height))
  2410. h->c.context_reinit = 1;
  2411. h->c.width = width;
  2412. h->c.height = height;
  2413. }
  2414. }
  2415. h->c.progressive_sequence =
  2416. h->c.progressive_frame = get_bits1(gb) ^ 1;
  2417. h->c.interlaced_dct = 0;
  2418. if (!get_bits1(gb) && (h->c.avctx->debug & FF_DEBUG_PICT_INFO))
  2419. av_log(h->c.avctx, AV_LOG_INFO, /* OBMC Disable */
  2420. "MPEG-4 OBMC not supported (very likely buggy encoder)\n");
  2421. if (vo_ver_id == 1)
  2422. ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
  2423. else
  2424. ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
  2425. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  2426. av_log(h->c.avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
  2427. if (ctx->vol_sprite_usage == STATIC_SPRITE ||
  2428. ctx->vol_sprite_usage == GMC_SPRITE) {
  2429. if (ctx->vol_sprite_usage == STATIC_SPRITE) {
  2430. skip_bits(gb, 13); // sprite_width
  2431. check_marker(h->c.avctx, gb, "after sprite_width");
  2432. skip_bits(gb, 13); // sprite_height
  2433. check_marker(h->c.avctx, gb, "after sprite_height");
  2434. skip_bits(gb, 13); // sprite_left
  2435. check_marker(h->c.avctx, gb, "after sprite_left");
  2436. skip_bits(gb, 13); // sprite_top
  2437. check_marker(h->c.avctx, gb, "after sprite_top");
  2438. }
  2439. ctx->num_sprite_warping_points = get_bits(gb, 6);
  2440. if (ctx->num_sprite_warping_points > 3) {
  2441. av_log(h->c.avctx, AV_LOG_ERROR,
  2442. "%d sprite_warping_points\n",
  2443. ctx->num_sprite_warping_points);
  2444. ctx->num_sprite_warping_points = 0;
  2445. return AVERROR_INVALIDDATA;
  2446. }
  2447. ctx->sprite_warping_accuracy = get_bits(gb, 2);
  2448. ctx->sprite_brightness_change = get_bits1(gb);
  2449. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  2450. skip_bits1(gb); // low_latency_sprite
  2451. }
  2452. // FIXME sadct disable bit if verid!=1 && shape not rect
  2453. if (get_bits1(gb) == 1) { /* not_8_bit */
  2454. ctx->quant_precision = get_bits(gb, 4); /* quant_precision */
  2455. if (get_bits(gb, 4) != 8) /* bits_per_pixel */
  2456. av_log(h->c.avctx, AV_LOG_ERROR, "N-bit not supported\n");
  2457. if (ctx->quant_precision != 5)
  2458. av_log(h->c.avctx, AV_LOG_ERROR,
  2459. "quant precision %d\n", ctx->quant_precision);
  2460. if (ctx->quant_precision < 3 || ctx->quant_precision > 9)
  2461. ctx->quant_precision = 5;
  2462. } else {
  2463. ctx->quant_precision = 5;
  2464. }
  2465. // FIXME a bunch of grayscale shape things
  2466. if ((ctx->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
  2467. int i, v;
  2468. mpeg4_load_default_matrices(&h->c);
  2469. /* load custom intra matrix */
  2470. if (get_bits1(gb)) {
  2471. int last = 0;
  2472. for (i = 0; i < 64; i++) {
  2473. int j;
  2474. if (get_bits_left(gb) < 8) {
  2475. av_log(h->c.avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
  2476. return AVERROR_INVALIDDATA;
  2477. }
  2478. v = get_bits(gb, 8);
  2479. if (v == 0)
  2480. break;
  2481. last = v;
  2482. j = h->c.idsp.idct_permutation[ff_zigzag_direct[i]];
  2483. h->c.intra_matrix[j] = last;
  2484. }
  2485. /* replicate last value */
  2486. for (; i < 64; i++) {
  2487. int j = h->c.idsp.idct_permutation[ff_zigzag_direct[i]];
  2488. h->c.intra_matrix[j] = last;
  2489. }
  2490. }
  2491. /* load custom non intra matrix */
  2492. if (get_bits1(gb)) {
  2493. int last = 0;
  2494. for (i = 0; i < 64; i++) {
  2495. int j;
  2496. if (get_bits_left(gb) < 8) {
  2497. av_log(h->c.avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
  2498. return AVERROR_INVALIDDATA;
  2499. }
  2500. v = get_bits(gb, 8);
  2501. if (v == 0)
  2502. break;
  2503. last = v;
  2504. j = h->c.idsp.idct_permutation[ff_zigzag_direct[i]];
  2505. h->c.inter_matrix[j] = v;
  2506. }
  2507. /* replicate last value */
  2508. for (; i < 64; i++) {
  2509. int j = h->c.idsp.idct_permutation[ff_zigzag_direct[i]];
  2510. h->c.inter_matrix[j] = last;
  2511. }
  2512. }
  2513. // FIXME a bunch of grayscale shape things
  2514. }
  2515. if (vo_ver_id != 1)
  2516. h->c.quarter_sample = get_bits1(gb);
  2517. else
  2518. h->c.quarter_sample = 0;
  2519. if (get_bits_left(gb) < 4) {
  2520. av_log(h->c.avctx, AV_LOG_ERROR, "VOL Header truncated\n");
  2521. return AVERROR_INVALIDDATA;
  2522. }
  2523. if (!get_bits1(gb)) {
  2524. int pos = get_bits_count(gb);
  2525. int estimation_method = get_bits(gb, 2);
  2526. if (estimation_method < 2) {
  2527. if (!get_bits1(gb)) {
  2528. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
  2529. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
  2530. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
  2531. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
  2532. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
  2533. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */
  2534. }
  2535. if (!get_bits1(gb)) {
  2536. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
  2537. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
  2538. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
  2539. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
  2540. }
  2541. if (!check_marker(h->c.avctx, gb, "in complexity estimation part 1")) {
  2542. skip_bits_long(gb, pos - get_bits_count(gb));
  2543. goto no_cplx_est;
  2544. }
  2545. if (!get_bits1(gb)) {
  2546. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
  2547. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
  2548. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
  2549. ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
  2550. }
  2551. if (!get_bits1(gb)) {
  2552. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
  2553. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
  2554. ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
  2555. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
  2556. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
  2557. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
  2558. }
  2559. if (!check_marker(h->c.avctx, gb, "in complexity estimation part 2")) {
  2560. skip_bits_long(gb, pos - get_bits_count(gb));
  2561. goto no_cplx_est;
  2562. }
  2563. if (estimation_method == 1) {
  2564. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
  2565. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
  2566. }
  2567. } else
  2568. av_log(h->c.avctx, AV_LOG_ERROR,
  2569. "Invalid Complexity estimation method %d\n",
  2570. estimation_method);
  2571. } else {
  2572. no_cplx_est:
  2573. ctx->cplx_estimation_trash_i =
  2574. ctx->cplx_estimation_trash_p =
  2575. ctx->cplx_estimation_trash_b = 0;
  2576. }
  2577. ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
  2578. h->data_partitioning = get_bits1(gb);
  2579. if (h->data_partitioning)
  2580. ctx->rvlc = get_bits1(gb);
  2581. if (vo_ver_id != 1) {
  2582. ctx->new_pred = get_bits1(gb);
  2583. if (ctx->new_pred) {
  2584. av_log(h->c.avctx, AV_LOG_ERROR, "new pred not supported\n");
  2585. skip_bits(gb, 2); /* requested upstream message type */
  2586. skip_bits1(gb); /* newpred segment type */
  2587. }
  2588. if (get_bits1(gb)) // reduced_res_vop
  2589. av_log(h->c.avctx, AV_LOG_ERROR,
  2590. "reduced resolution VOP not supported\n");
  2591. } else {
  2592. ctx->new_pred = 0;
  2593. }
  2594. ctx->scalability = get_bits1(gb);
  2595. if (ctx->scalability) {
  2596. GetBitContext bak = *gb;
  2597. int h_sampling_factor_n;
  2598. int h_sampling_factor_m;
  2599. int v_sampling_factor_n;
  2600. int v_sampling_factor_m;
  2601. skip_bits1(gb); // hierarchy_type
  2602. skip_bits(gb, 4); /* ref_layer_id */
  2603. skip_bits1(gb); /* ref_layer_sampling_dir */
  2604. h_sampling_factor_n = get_bits(gb, 5);
  2605. h_sampling_factor_m = get_bits(gb, 5);
  2606. v_sampling_factor_n = get_bits(gb, 5);
  2607. v_sampling_factor_m = get_bits(gb, 5);
  2608. ctx->enhancement_type = get_bits1(gb);
  2609. if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
  2610. v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
  2611. /* illegal scalability header (VERY broken encoder),
  2612. * trying to workaround */
  2613. ctx->scalability = 0;
  2614. *gb = bak;
  2615. } else
  2616. av_log(h->c.avctx, AV_LOG_ERROR, "scalability not supported\n");
  2617. // bin shape stuff FIXME
  2618. }
  2619. }
  2620. if (h->c.avctx->debug&FF_DEBUG_PICT_INFO) {
  2621. av_log(h->c.avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n",
  2622. h->c.avctx->framerate.den, h->c.avctx->framerate.num,
  2623. ctx->time_increment_bits,
  2624. ctx->quant_precision,
  2625. h->c.progressive_sequence,
  2626. h->c.low_delay,
  2627. ctx->scalability ? "scalability " :"" ,
  2628. h->c.quarter_sample ? "qpel " : "",
  2629. h->data_partitioning ? "partition " : "",
  2630. ctx->rvlc ? "rvlc " : ""
  2631. );
  2632. }
  2633. return 0;
  2634. }
  2635. /**
  2636. * Decode the user data stuff in the header.
  2637. * Also initializes divx/xvid/lavc_version/build.
  2638. */
  2639. static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
  2640. {
  2641. H263DecContext *const h = &ctx->h;
  2642. char buf[256];
  2643. int i;
  2644. int e;
  2645. int ver = 0, build = 0, ver2 = 0, ver3 = 0;
  2646. char last;
  2647. for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
  2648. if (show_bits(gb, 23) == 0)
  2649. break;
  2650. buf[i] = get_bits(gb, 8);
  2651. }
  2652. buf[i] = 0;
  2653. /* divx detection */
  2654. e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
  2655. if (e < 2)
  2656. e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
  2657. if (e >= 2) {
  2658. ctx->divx_version = ver;
  2659. ctx->divx_build = build;
  2660. h->divx_packed = e == 3 && last == 'p';
  2661. }
  2662. /* libavcodec detection */
  2663. e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
  2664. if (e != 4)
  2665. e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
  2666. if (e != 4) {
  2667. e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
  2668. if (e > 1) {
  2669. if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) {
  2670. av_log(h->c.avctx, AV_LOG_WARNING,
  2671. "Unknown Lavc version string encountered, %d.%d.%d; "
  2672. "clamping sub-version values to 8-bits.\n",
  2673. ver, ver2, ver3);
  2674. }
  2675. build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF);
  2676. }
  2677. }
  2678. if (e != 4) {
  2679. if (strcmp(buf, "ffmpeg") == 0)
  2680. ctx->lavc_build = 4600;
  2681. }
  2682. if (e == 4)
  2683. ctx->lavc_build = build;
  2684. /* Xvid detection */
  2685. e = sscanf(buf, "XviD%d", &build);
  2686. if (e == 1)
  2687. ctx->xvid_build = build;
  2688. return 0;
  2689. }
  2690. static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
  2691. int parse_only)
  2692. {
  2693. H263DecContext *const h = &ctx->h;
  2694. int time_incr, time_increment;
  2695. int64_t pts;
  2696. h->c.mcsel = 0;
  2697. h->c.pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
  2698. if (h->c.pict_type == AV_PICTURE_TYPE_B && h->c.low_delay &&
  2699. ctx->vol_control_parameters == 0 && !(h->c.avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) {
  2700. av_log(h->c.avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n");
  2701. h->c.low_delay = 0;
  2702. }
  2703. h->partitioned_frame = h->data_partitioning && h->c.pict_type != AV_PICTURE_TYPE_B;
  2704. if (h->partitioned_frame)
  2705. h->decode_mb = mpeg4_decode_partitioned_mb;
  2706. else
  2707. h->decode_mb = mpeg4_decode_mb;
  2708. time_incr = 0;
  2709. while (get_bits1(gb) != 0)
  2710. time_incr++;
  2711. check_marker(h->c.avctx, gb, "before time_increment");
  2712. if (ctx->time_increment_bits == 0 ||
  2713. !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
  2714. av_log(h->c.avctx, AV_LOG_WARNING,
  2715. "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits);
  2716. for (ctx->time_increment_bits = 1;
  2717. ctx->time_increment_bits < 16;
  2718. ctx->time_increment_bits++) {
  2719. if (h->c.pict_type == AV_PICTURE_TYPE_P ||
  2720. (h->c.pict_type == AV_PICTURE_TYPE_S &&
  2721. ctx->vol_sprite_usage == GMC_SPRITE)) {
  2722. if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
  2723. break;
  2724. } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
  2725. break;
  2726. }
  2727. av_log(h->c.avctx, AV_LOG_WARNING,
  2728. "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits);
  2729. }
  2730. if (IS_3IV1)
  2731. time_increment = get_bits1(gb); // FIXME investigate further
  2732. else
  2733. time_increment = get_bits(gb, ctx->time_increment_bits);
  2734. if (h->c.pict_type != AV_PICTURE_TYPE_B) {
  2735. h->c.last_time_base = h->c.time_base;
  2736. h->c.time_base += time_incr;
  2737. h->c.time = h->c.time_base * (int64_t)h->c.avctx->framerate.num + time_increment;
  2738. if (h->c.workaround_bugs & FF_BUG_UMP4) {
  2739. if (h->c.time < h->c.last_non_b_time) {
  2740. /* header is not mpeg-4-compatible, broken encoder,
  2741. * trying to workaround */
  2742. h->c.time_base++;
  2743. h->c.time += h->c.avctx->framerate.num;
  2744. }
  2745. }
  2746. h->c.pp_time = h->c.time - h->c.last_non_b_time;
  2747. h->c.last_non_b_time = h->c.time;
  2748. } else {
  2749. h->c.time = (h->c.last_time_base + time_incr) * (int64_t)h->c.avctx->framerate.num + time_increment;
  2750. h->c.pb_time = h->c.pp_time - (h->c.last_non_b_time - h->c.time);
  2751. if (h->c.pp_time <= h->c.pb_time ||
  2752. h->c.pp_time <= h->c.pp_time - h->c.pb_time ||
  2753. h->c.pp_time <= 0) {
  2754. /* messed up order, maybe after seeking? skipping current B-frame */
  2755. return FRAME_SKIPPED;
  2756. }
  2757. ff_mpeg4_init_direct_mv(&h->c);
  2758. if (ctx->t_frame == 0)
  2759. ctx->t_frame = h->c.pb_time;
  2760. if (ctx->t_frame == 0)
  2761. ctx->t_frame = 1; // 1/0 protection
  2762. h->c.pp_field_time = (ROUNDED_DIV(h->c.last_non_b_time, ctx->t_frame) -
  2763. ROUNDED_DIV(h->c.last_non_b_time - h->c.pp_time, ctx->t_frame)) * 2;
  2764. h->c.pb_field_time = (ROUNDED_DIV(h->c.time, ctx->t_frame) -
  2765. ROUNDED_DIV(h->c.last_non_b_time - h->c.pp_time, ctx->t_frame)) * 2;
  2766. if (h->c.pp_field_time <= h->c.pb_field_time || h->c.pb_field_time <= 1) {
  2767. h->c.pb_field_time = 2;
  2768. h->c.pp_field_time = 4;
  2769. if (!h->c.progressive_sequence)
  2770. return FRAME_SKIPPED;
  2771. }
  2772. }
  2773. if (h->c.avctx->framerate.den)
  2774. pts = ROUNDED_DIV(h->c.time, h->c.avctx->framerate.den);
  2775. else
  2776. pts = AV_NOPTS_VALUE;
  2777. ff_dlog(h->c.avctx, "MPEG4 PTS: %"PRId64"\n", pts);
  2778. check_marker(h->c.avctx, gb, "before vop_coded");
  2779. /* vop coded */
  2780. if (get_bits1(gb) != 1) {
  2781. if (h->c.avctx->debug & FF_DEBUG_PICT_INFO)
  2782. av_log(h->c.avctx, AV_LOG_ERROR, "vop not coded\n");
  2783. h->skipped_last_frame = 1;
  2784. return FRAME_SKIPPED;
  2785. }
  2786. if (ctx->new_pred)
  2787. decode_new_pred(ctx, gb);
  2788. if (ctx->shape != BIN_ONLY_SHAPE &&
  2789. (h->c.pict_type == AV_PICTURE_TYPE_P ||
  2790. (h->c.pict_type == AV_PICTURE_TYPE_S &&
  2791. ctx->vol_sprite_usage == GMC_SPRITE))) {
  2792. /* rounding type for motion estimation */
  2793. h->c.no_rounding = get_bits1(gb);
  2794. } else {
  2795. h->c.no_rounding = 0;
  2796. }
  2797. // FIXME reduced res stuff
  2798. if (ctx->shape != RECT_SHAPE) {
  2799. if (ctx->vol_sprite_usage != 1 || h->c.pict_type != AV_PICTURE_TYPE_I) {
  2800. skip_bits(gb, 13); /* width */
  2801. check_marker(h->c.avctx, gb, "after width");
  2802. skip_bits(gb, 13); /* height */
  2803. check_marker(h->c.avctx, gb, "after height");
  2804. skip_bits(gb, 13); /* hor_spat_ref */
  2805. check_marker(h->c.avctx, gb, "after hor_spat_ref");
  2806. skip_bits(gb, 13); /* ver_spat_ref */
  2807. }
  2808. skip_bits1(gb); /* change_CR_disable */
  2809. if (get_bits1(gb) != 0)
  2810. skip_bits(gb, 8); /* constant_alpha_value */
  2811. }
  2812. // FIXME complexity estimation stuff
  2813. if (ctx->shape != BIN_ONLY_SHAPE) {
  2814. skip_bits_long(gb, ctx->cplx_estimation_trash_i);
  2815. if (h->c.pict_type != AV_PICTURE_TYPE_I)
  2816. skip_bits_long(gb, ctx->cplx_estimation_trash_p);
  2817. if (h->c.pict_type == AV_PICTURE_TYPE_B)
  2818. skip_bits_long(gb, ctx->cplx_estimation_trash_b);
  2819. if (get_bits_left(gb) < 3) {
  2820. av_log(h->c.avctx, AV_LOG_ERROR, "Header truncated\n");
  2821. return AVERROR_INVALIDDATA;
  2822. }
  2823. ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
  2824. if (!h->c.progressive_sequence) {
  2825. h->c.top_field_first = get_bits1(gb);
  2826. h->c.alternate_scan = get_bits1(gb);
  2827. } else
  2828. h->c.alternate_scan = 0;
  2829. }
  2830. /* Skip at this point when only parsing since the remaining
  2831. * data is not useful for a parser and requires the
  2832. * sprite_trajectory VLC to be initialized. */
  2833. if (parse_only)
  2834. goto end;
  2835. if (h->c.alternate_scan) {
  2836. ff_init_scantable(h->c.idsp.idct_permutation, &h->c.intra_scantable, ff_alternate_vertical_scan);
  2837. ff_permute_scantable(h->c.permutated_intra_h_scantable, ff_alternate_vertical_scan,
  2838. h->c.idsp.idct_permutation);
  2839. } else {
  2840. ff_init_scantable(h->c.idsp.idct_permutation, &h->c.intra_scantable, ff_zigzag_direct);
  2841. ff_permute_scantable(h->c.permutated_intra_h_scantable, ff_alternate_horizontal_scan,
  2842. h->c.idsp.idct_permutation);
  2843. }
  2844. ff_permute_scantable(h->c.permutated_intra_v_scantable, ff_alternate_vertical_scan,
  2845. h->c.idsp.idct_permutation);
  2846. if (h->c.pict_type == AV_PICTURE_TYPE_S) {
  2847. if((ctx->vol_sprite_usage == STATIC_SPRITE ||
  2848. ctx->vol_sprite_usage == GMC_SPRITE)) {
  2849. if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
  2850. return AVERROR_INVALIDDATA;
  2851. if (ctx->sprite_brightness_change)
  2852. av_log(h->c.avctx, AV_LOG_ERROR,
  2853. "sprite_brightness_change not supported\n");
  2854. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  2855. av_log(h->c.avctx, AV_LOG_ERROR, "static sprite not supported\n");
  2856. } else {
  2857. memset(ctx->sprite_offset, 0, sizeof(ctx->sprite_offset));
  2858. memset(ctx->sprite_delta, 0, sizeof(ctx->sprite_delta));
  2859. }
  2860. }
  2861. ctx->f_code = 1;
  2862. ctx->b_code = 1;
  2863. if (ctx->shape != BIN_ONLY_SHAPE) {
  2864. h->c.chroma_qscale = h->c.qscale = get_bits(gb, ctx->quant_precision);
  2865. if (h->c.qscale == 0) {
  2866. av_log(h->c.avctx, AV_LOG_ERROR,
  2867. "Error, header damaged or not MPEG-4 header (qscale=0)\n");
  2868. return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
  2869. }
  2870. if (h->c.pict_type != AV_PICTURE_TYPE_I) {
  2871. ctx->f_code = get_bits(gb, 3); /* fcode_for */
  2872. if (ctx->f_code == 0) {
  2873. av_log(h->c.avctx, AV_LOG_ERROR,
  2874. "Error, header damaged or not MPEG-4 header (f_code=0)\n");
  2875. ctx->f_code = 1;
  2876. return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
  2877. }
  2878. }
  2879. if (h->c.pict_type == AV_PICTURE_TYPE_B) {
  2880. ctx->b_code = get_bits(gb, 3);
  2881. if (ctx->b_code == 0) {
  2882. av_log(h->c.avctx, AV_LOG_ERROR,
  2883. "Error, header damaged or not MPEG4 header (b_code=0)\n");
  2884. ctx->b_code=1;
  2885. return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly
  2886. }
  2887. }
  2888. if (h->c.avctx->debug & FF_DEBUG_PICT_INFO) {
  2889. av_log(h->c.avctx, AV_LOG_DEBUG,
  2890. "qp:%d fc:%d,%d %c size:%d pro:%d alt:%d top:%d %cpel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n",
  2891. h->c.qscale, ctx->f_code, ctx->b_code,
  2892. h->c.pict_type == AV_PICTURE_TYPE_I ? 'I' : (h->c.pict_type == AV_PICTURE_TYPE_P ? 'P' : (h->c.pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
  2893. gb->size_in_bits,h->c.progressive_sequence, h->c.alternate_scan,
  2894. h->c.top_field_first, h->c.quarter_sample ? 'q' : 'h',
  2895. h->data_partitioning, ctx->resync_marker,
  2896. ctx->num_sprite_warping_points, ctx->sprite_warping_accuracy,
  2897. 1 - h->c.no_rounding, ctx->vo_type,
  2898. ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
  2899. ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
  2900. ctx->cplx_estimation_trash_b,
  2901. h->c.time,
  2902. time_increment
  2903. );
  2904. }
  2905. if (!ctx->scalability) {
  2906. if (ctx->shape != RECT_SHAPE && h->c.pict_type != AV_PICTURE_TYPE_I)
  2907. skip_bits1(gb); // vop shape coding type
  2908. } else {
  2909. if (ctx->enhancement_type) {
  2910. int load_backward_shape = get_bits1(gb);
  2911. if (load_backward_shape)
  2912. av_log(h->c.avctx, AV_LOG_ERROR,
  2913. "load backward shape isn't supported\n");
  2914. }
  2915. skip_bits(gb, 2); // ref_select_code
  2916. }
  2917. }
  2918. h->c.dct_unquantize_intra = ctx->mpeg_quant ? ctx->dct_unquantize_mpeg2_intra
  2919. : ctx->dct_unquantize_h263_intra;
  2920. // The following tells ff_mpv_reconstruct_mb() to unquantize iff mpeg_quant
  2921. h->c.dct_unquantize_inter = ctx->mpeg_quant ? ctx->dct_unquantize_mpeg2_inter : NULL;
  2922. end:
  2923. /* detect buggy encoders which don't set the low_delay flag
  2924. * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames
  2925. * easily (although it's buggy too) */
  2926. if (ctx->vo_type == 0 && ctx->vol_control_parameters == 0 &&
  2927. ctx->divx_version == -1 && h->picture_number == 0) {
  2928. av_log(h->c.avctx, AV_LOG_WARNING,
  2929. "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
  2930. h->c.low_delay = 1;
  2931. }
  2932. h->picture_number++; // better than pic number==0 always ;)
  2933. if (h->c.workaround_bugs & FF_BUG_EDGE) {
  2934. h->c.h_edge_pos = h->c.width;
  2935. h->c.v_edge_pos = h->c.height;
  2936. }
  2937. return 0;
  2938. }
  2939. static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
  2940. {
  2941. AVCodecContext *const avctx = ctx->h.c.avctx;
  2942. skip_bits(gb, 16); /* Time_code[63..48] */
  2943. check_marker(avctx, gb, "after Time_code[63..48]");
  2944. skip_bits(gb, 16); /* Time_code[47..32] */
  2945. check_marker(avctx, gb, "after Time_code[47..32]");
  2946. skip_bits(gb, 16); /* Time_code[31..16] */
  2947. check_marker(avctx, gb, "after Time_code[31..16]");
  2948. skip_bits(gb, 16); /* Time_code[15..0] */
  2949. check_marker(avctx, gb, "after Time_code[15..0]");
  2950. skip_bits(gb, 4); /* reserved_bits */
  2951. }
  2952. /**
  2953. * Decode the next studio vop header.
  2954. * @return <0 if something went wrong
  2955. */
  2956. static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2957. {
  2958. H263DecContext *const h = &ctx->h;
  2959. if (get_bits_left(gb) <= 32)
  2960. return 0;
  2961. h->partitioned_frame = 0;
  2962. h->c.interlaced_dct = 0;
  2963. h->decode_mb = mpeg4_decode_studio_mb;
  2964. decode_smpte_tc(ctx, gb);
  2965. skip_bits(gb, 10); /* temporal_reference */
  2966. skip_bits(gb, 2); /* vop_structure */
  2967. h->c.pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */
  2968. if (get_bits1(gb)) { /* vop_coded */
  2969. skip_bits1(gb); /* top_field_first */
  2970. skip_bits1(gb); /* repeat_first_field */
  2971. h->c.progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */
  2972. }
  2973. if (h->c.pict_type == AV_PICTURE_TYPE_I) {
  2974. if (get_bits1(gb))
  2975. reset_studio_dc_predictors(ctx);
  2976. }
  2977. if (ctx->shape != BIN_ONLY_SHAPE) {
  2978. h->c.alternate_scan = get_bits1(gb);
  2979. h->c.frame_pred_frame_dct = get_bits1(gb);
  2980. ctx->dct_precision = get_bits(gb, 2);
  2981. h->c.intra_dc_precision = get_bits(gb, 2);
  2982. h->c.q_scale_type = get_bits1(gb);
  2983. }
  2984. ff_init_scantable(h->c.idsp.idct_permutation, &h->c.intra_scantable,
  2985. h->c.alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct);
  2986. mpeg4_load_default_matrices(&h->c);
  2987. next_start_code_studio(gb);
  2988. extension_and_user_data(&h->c, gb, 4);
  2989. return 0;
  2990. }
  2991. static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
  2992. {
  2993. int visual_object_type;
  2994. skip_bits(gb, 4); /* visual_object_verid */
  2995. visual_object_type = get_bits(gb, 4);
  2996. if (visual_object_type != VOT_VIDEO_ID) {
  2997. avpriv_request_sample(ctx->h.c.avctx, "VO type %u", visual_object_type);
  2998. return AVERROR_PATCHWELCOME;
  2999. }
  3000. next_start_code_studio(gb);
  3001. extension_and_user_data(&ctx->h.c, gb, 1);
  3002. return 0;
  3003. }
  3004. /**
  3005. * Decode MPEG-4 headers.
  3006. *
  3007. * @param header If set the absence of a VOP is not treated as error; otherwise, it is treated as such.
  3008. * @param parse_only If set, things only relevant to a decoder may be skipped;
  3009. * furthermore, the VLC tables may be uninitialized.
  3010. * @return <0 if an error occurred
  3011. * FRAME_SKIPPED if a not coded VOP is found
  3012. * 0 else
  3013. */
  3014. int ff_mpeg4_parse_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
  3015. int header, int parse_only)
  3016. {
  3017. MPVContext *const s = &ctx->h.c;
  3018. unsigned startcode, v;
  3019. int ret;
  3020. int vol = 0;
  3021. /* search next start code */
  3022. align_get_bits(gb);
  3023. // If we have not switched to studio profile than we also did not switch bps
  3024. // that means something else (like a previous instance) outside set bps which
  3025. // would be inconsistent with the correct state, thus reset it
  3026. if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8)
  3027. s->avctx->bits_per_raw_sample = 0;
  3028. if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) {
  3029. skip_bits(gb, 24);
  3030. if (get_bits(gb, 8) == 0xF0)
  3031. goto end;
  3032. }
  3033. startcode = 0xff;
  3034. for (;;) {
  3035. if (get_bits_count(gb) >= gb->size_in_bits) {
  3036. if (gb->size_in_bits == 8 &&
  3037. (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) {
  3038. av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits);
  3039. return FRAME_SKIPPED; // divx bug
  3040. } else if (header && get_bits_count(gb) == gb->size_in_bits) {
  3041. return 0; // ordinary return value for parsing of extradata
  3042. } else
  3043. return AVERROR_INVALIDDATA; // end of stream
  3044. }
  3045. /* use the bits after the test */
  3046. v = get_bits(gb, 8);
  3047. startcode = ((startcode << 8) | v) & 0xffffffff;
  3048. if ((startcode & 0xFFFFFF00) != 0x100)
  3049. continue; // no startcode
  3050. if (s->avctx->debug & FF_DEBUG_STARTCODE) {
  3051. const char *name;
  3052. if (startcode <= 0x11F)
  3053. name = "Video Object Start";
  3054. else if (startcode <= 0x12F)
  3055. name = "Video Object Layer Start";
  3056. else if (startcode <= 0x13F)
  3057. name = "Reserved";
  3058. else if (startcode <= 0x15F)
  3059. name = "FGS bp start";
  3060. else if (startcode <= 0x1AF)
  3061. name = "Reserved";
  3062. else if (startcode == 0x1B0)
  3063. name = "Visual Object Seq Start";
  3064. else if (startcode == 0x1B1)
  3065. name = "Visual Object Seq End";
  3066. else if (startcode == 0x1B2)
  3067. name = "User Data";
  3068. else if (startcode == 0x1B3)
  3069. name = "Group of VOP start";
  3070. else if (startcode == 0x1B4)
  3071. name = "Video Session Error";
  3072. else if (startcode == 0x1B5)
  3073. name = "Visual Object Start";
  3074. else if (startcode == 0x1B6)
  3075. name = "Video Object Plane start";
  3076. else if (startcode == 0x1B7)
  3077. name = "slice start";
  3078. else if (startcode == 0x1B8)
  3079. name = "extension start";
  3080. else if (startcode == 0x1B9)
  3081. name = "fgs start";
  3082. else if (startcode == 0x1BA)
  3083. name = "FBA Object start";
  3084. else if (startcode == 0x1BB)
  3085. name = "FBA Object Plane start";
  3086. else if (startcode == 0x1BC)
  3087. name = "Mesh Object start";
  3088. else if (startcode == 0x1BD)
  3089. name = "Mesh Object Plane start";
  3090. else if (startcode == 0x1BE)
  3091. name = "Still Texture Object start";
  3092. else if (startcode == 0x1BF)
  3093. name = "Texture Spatial Layer start";
  3094. else if (startcode == 0x1C0)
  3095. name = "Texture SNR Layer start";
  3096. else if (startcode == 0x1C1)
  3097. name = "Texture Tile start";
  3098. else if (startcode == 0x1C2)
  3099. name = "Texture Shape Layer start";
  3100. else if (startcode == 0x1C3)
  3101. name = "stuffing start";
  3102. else if (startcode <= 0x1C5)
  3103. name = "Reserved";
  3104. else if (startcode <= 0x1FF)
  3105. name = "System start";
  3106. else
  3107. av_unreachable("Unexpected startcode");
  3108. av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X %s at %d\n",
  3109. startcode, name, get_bits_count(gb));
  3110. }
  3111. if (startcode >= 0x120 && startcode <= 0x12F) {
  3112. if (vol) {
  3113. av_log(s->avctx, AV_LOG_WARNING, "Ignoring multiple VOL headers\n");
  3114. continue;
  3115. }
  3116. vol++;
  3117. if ((ret = decode_vol_header(ctx, gb)) < 0)
  3118. return ret;
  3119. } else if (startcode == USER_DATA_STARTCODE) {
  3120. decode_user_data(ctx, gb);
  3121. } else if (startcode == GOP_STARTCODE) {
  3122. mpeg4_decode_gop_header(s, gb);
  3123. } else if (startcode == VOS_STARTCODE) {
  3124. int profile, level;
  3125. mpeg4_decode_profile_level(s, gb, &profile, &level);
  3126. if (profile == AV_PROFILE_MPEG4_SIMPLE_STUDIO &&
  3127. (level > 0 && level < 9)) {
  3128. s->studio_profile = 1;
  3129. next_start_code_studio(gb);
  3130. extension_and_user_data(s, gb, 0);
  3131. } else if (s->studio_profile) {
  3132. avpriv_request_sample(s->avctx, "Mix of studio and non studio profile");
  3133. return AVERROR_PATCHWELCOME;
  3134. }
  3135. s->avctx->profile = profile;
  3136. s->avctx->level = level;
  3137. } else if (startcode == VISUAL_OBJ_STARTCODE) {
  3138. if (s->studio_profile) {
  3139. if ((ret = decode_studiovisualobject(ctx, gb)) < 0)
  3140. return ret;
  3141. } else
  3142. mpeg4_decode_visual_object(s, gb);
  3143. } else if (startcode == VOP_STARTCODE) {
  3144. break;
  3145. }
  3146. align_get_bits(gb);
  3147. startcode = 0xff;
  3148. }
  3149. end:
  3150. if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
  3151. s->low_delay = 1;
  3152. if (s->studio_profile) {
  3153. if (!s->avctx->bits_per_raw_sample) {
  3154. av_log(s->avctx, AV_LOG_ERROR, "Missing VOL header\n");
  3155. return AVERROR_INVALIDDATA;
  3156. }
  3157. return decode_studio_vop_header(ctx, gb);
  3158. } else
  3159. return decode_vop_header(ctx, gb, parse_only);
  3160. }
  3161. #if CONFIG_MPEG4_DECODER
  3162. static av_cold void permute_quant_matrix(uint16_t matrix[64],
  3163. const uint8_t new_perm[64],
  3164. const uint8_t old_perm[64])
  3165. {
  3166. uint16_t tmp[64];
  3167. memcpy(tmp, matrix, sizeof(tmp));
  3168. for (int i = 0; i < 64; ++i)
  3169. matrix[new_perm[i]] = tmp[old_perm[i]];
  3170. }
  3171. static av_cold void switch_to_xvid_idct(AVCodecContext *const avctx,
  3172. MpegEncContext *const s)
  3173. {
  3174. uint8_t old_permutation[64];
  3175. memcpy(old_permutation, s->idsp.idct_permutation, sizeof(old_permutation));
  3176. avctx->idct_algo = FF_IDCT_XVID;
  3177. ff_mpv_idct_init(s);
  3178. ff_permute_scantable(s->permutated_intra_h_scantable,
  3179. s->alternate_scan ? ff_alternate_vertical_scan : ff_alternate_horizontal_scan,
  3180. s->idsp.idct_permutation);
  3181. // Normal (i.e. non-studio) MPEG-4 does not use the chroma matrices.
  3182. permute_quant_matrix(s->inter_matrix, s->idsp.idct_permutation, old_permutation);
  3183. permute_quant_matrix(s->intra_matrix, s->idsp.idct_permutation, old_permutation);
  3184. }
  3185. void ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
  3186. {
  3187. Mpeg4DecContext *ctx = avctx->priv_data;
  3188. H263DecContext *const h = &ctx->h;
  3189. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
  3190. if (h->c.codec_tag == AV_RL32("XVID") ||
  3191. h->c.codec_tag == AV_RL32("XVIX") ||
  3192. h->c.codec_tag == AV_RL32("RMP4") ||
  3193. h->c.codec_tag == AV_RL32("ZMP4") ||
  3194. h->c.codec_tag == AV_RL32("SIPP"))
  3195. ctx->xvid_build = 0;
  3196. }
  3197. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1)
  3198. if (h->c.codec_tag == AV_RL32("DIVX") && ctx->vo_type == 0 &&
  3199. ctx->vol_control_parameters == 0)
  3200. ctx->divx_version = 400; // divx 4
  3201. if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) {
  3202. ctx->divx_version =
  3203. ctx->divx_build = -1;
  3204. }
  3205. if (h->c.workaround_bugs & FF_BUG_AUTODETECT) {
  3206. if (h->c.codec_tag == AV_RL32("XVIX"))
  3207. h->c.workaround_bugs |= FF_BUG_XVID_ILACE;
  3208. if (h->c.codec_tag == AV_RL32("UMP4"))
  3209. h->c.workaround_bugs |= FF_BUG_UMP4;
  3210. if (ctx->divx_version >= 500 && ctx->divx_build < 1814)
  3211. h->c.workaround_bugs |= FF_BUG_QPEL_CHROMA;
  3212. if (ctx->divx_version > 502 && ctx->divx_build < 1814)
  3213. h->c.workaround_bugs |= FF_BUG_QPEL_CHROMA2;
  3214. if (ctx->xvid_build <= 3U)
  3215. h->padding_bug_score = 256 * 256 * 256 * 64;
  3216. if (ctx->xvid_build <= 1U)
  3217. h->c.workaround_bugs |= FF_BUG_QPEL_CHROMA;
  3218. if (ctx->xvid_build <= 12U)
  3219. h->c.workaround_bugs |= FF_BUG_EDGE;
  3220. if (ctx->xvid_build <= 32U)
  3221. h->c.workaround_bugs |= FF_BUG_DC_CLIP;
  3222. #define SET_QPEL_FUNC(postfix1, postfix2) \
  3223. h->c.qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \
  3224. h->c.qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \
  3225. h->c.qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2;
  3226. if (ctx->lavc_build < 4653U)
  3227. h->c.workaround_bugs |= FF_BUG_STD_QPEL;
  3228. if (ctx->lavc_build < 4655U)
  3229. h->c.workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  3230. if (ctx->lavc_build < 4670U)
  3231. h->c.workaround_bugs |= FF_BUG_EDGE;
  3232. if (ctx->lavc_build <= 4712U)
  3233. h->c.workaround_bugs |= FF_BUG_DC_CLIP;
  3234. if ((ctx->lavc_build&0xFF) >= 100) {
  3235. if (ctx->lavc_build > 3621476 && ctx->lavc_build < 3752552 &&
  3236. (ctx->lavc_build < 3752037 || ctx->lavc_build > 3752191) // 3.2.1+
  3237. )
  3238. h->c.workaround_bugs |= FF_BUG_IEDGE;
  3239. }
  3240. if (ctx->divx_version >= 0)
  3241. h->c.workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  3242. if (ctx->divx_version == 501 && ctx->divx_build == 20020416)
  3243. h->padding_bug_score = 256 * 256 * 256 * 64;
  3244. if (ctx->divx_version < 500U)
  3245. h->c.workaround_bugs |= FF_BUG_EDGE;
  3246. if (ctx->divx_version >= 0)
  3247. h->c.workaround_bugs |= FF_BUG_HPEL_CHROMA;
  3248. }
  3249. if (h->c.workaround_bugs & FF_BUG_STD_QPEL) {
  3250. SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c)
  3251. SET_QPEL_FUNC(qpel_pixels_tab[0][7], qpel16_mc31_old_c)
  3252. SET_QPEL_FUNC(qpel_pixels_tab[0][9], qpel16_mc12_old_c)
  3253. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c)
  3254. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c)
  3255. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c)
  3256. SET_QPEL_FUNC(qpel_pixels_tab[1][5], qpel8_mc11_old_c)
  3257. SET_QPEL_FUNC(qpel_pixels_tab[1][7], qpel8_mc31_old_c)
  3258. SET_QPEL_FUNC(qpel_pixels_tab[1][9], qpel8_mc12_old_c)
  3259. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c)
  3260. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c)
  3261. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c)
  3262. }
  3263. if (avctx->debug & FF_DEBUG_BUGS)
  3264. av_log(h->c.avctx, AV_LOG_DEBUG,
  3265. "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
  3266. h->c.workaround_bugs, ctx->lavc_build, ctx->xvid_build,
  3267. ctx->divx_version, ctx->divx_build, h->divx_packed ? "p" : "");
  3268. if (ctx->xvid_build >= 0 &&
  3269. avctx->idct_algo == FF_IDCT_AUTO && !h->c.studio_profile) {
  3270. switch_to_xvid_idct(avctx, &h->c);
  3271. }
  3272. }
  3273. static int mpeg4_decode_picture_header(H263DecContext *const h)
  3274. {
  3275. Mpeg4DecContext *const ctx = h263_to_mpeg4(h);
  3276. h->skipped_last_frame = 0;
  3277. if (ctx->bitstream_buffer) {
  3278. int buf_size = get_bits_left(&h->gb) / 8U;
  3279. int bitstream_buffer_size = ctx->bitstream_buffer->size;
  3280. const uint8_t *buf = h->gb.buffer;
  3281. if (h->divx_packed) {
  3282. for (int i = 0; i < buf_size - 3; i++) {
  3283. if (buf[i] == 0 && buf[i+1] == 0 && buf[i+2] == 1) {
  3284. if (buf[i+3] == 0xB0) {
  3285. av_log(h->c.avctx, AV_LOG_WARNING, "Discarding excessive bitstream in packed xvid\n");
  3286. bitstream_buffer_size = 0;
  3287. }
  3288. break;
  3289. }
  3290. }
  3291. }
  3292. ctx->bitstream_buffer->size = 0;
  3293. if (bitstream_buffer_size && (h->divx_packed || buf_size <= MAX_NVOP_SIZE)) {// divx 5.01+/xvid frame reorder
  3294. int ret = init_get_bits8(&h->gb, ctx->bitstream_buffer->data,
  3295. bitstream_buffer_size);
  3296. if (ret < 0)
  3297. return ret;
  3298. } else
  3299. av_buffer_unref(&ctx->bitstream_buffer);
  3300. }
  3301. return ff_mpeg4_parse_picture_header(ctx, &h->gb, 0, 0);
  3302. }
  3303. int ff_mpeg4_frame_end(AVCodecContext *avctx, const AVPacket *pkt)
  3304. {
  3305. Mpeg4DecContext *ctx = avctx->priv_data;
  3306. H263DecContext *const h = &ctx->h;
  3307. int ret;
  3308. av_assert1(!ctx->bitstream_buffer || !ctx->bitstream_buffer->size);
  3309. /* divx 5.01+ bitstream reorder stuff */
  3310. if (h->divx_packed) {
  3311. int current_pos = ctx->bitstream_buffer && h->gb.buffer == ctx->bitstream_buffer->data ? 0 : (get_bits_count(&h->gb) >> 3);
  3312. int startcode_found = 0;
  3313. uint8_t *buf = pkt->data;
  3314. int buf_size = pkt->size;
  3315. if (buf_size - current_pos > 7) {
  3316. int i;
  3317. for (i = current_pos; i < buf_size - 4; i++)
  3318. if (buf[i] == 0 &&
  3319. buf[i + 1] == 0 &&
  3320. buf[i + 2] == 1 &&
  3321. buf[i + 3] == 0xB6) {
  3322. startcode_found = !(buf[i + 4] & 0x40);
  3323. break;
  3324. }
  3325. }
  3326. if (startcode_found) {
  3327. if (!ctx->showed_packed_warning) {
  3328. av_log(h->c.avctx, AV_LOG_INFO, "Video uses a non-standard and "
  3329. "wasteful way to store B-frames ('packed B-frames'). "
  3330. "Consider using the mpeg4_unpack_bframes bitstream filter without encoding but stream copy to fix it.\n");
  3331. ctx->showed_packed_warning = 1;
  3332. }
  3333. ret = av_buffer_replace(&ctx->bitstream_buffer, pkt->buf);
  3334. if (ret < 0)
  3335. return ret;
  3336. ctx->bitstream_buffer->data = buf + current_pos;
  3337. ctx->bitstream_buffer->size = buf_size - current_pos;
  3338. }
  3339. }
  3340. return 0;
  3341. }
  3342. #if HAVE_THREADS
  3343. static av_cold void clear_context(MpegEncContext *s)
  3344. {
  3345. memset(&s->buffer_pools, 0, sizeof(s->buffer_pools));
  3346. memset(&s->next_pic, 0, sizeof(s->next_pic));
  3347. memset(&s->last_pic, 0, sizeof(s->last_pic));
  3348. memset(&s->cur_pic, 0, sizeof(s->cur_pic));
  3349. memset(s->thread_context, 0, sizeof(s->thread_context));
  3350. s->ac_val_base = NULL;
  3351. s->ac_val = NULL;
  3352. memset(&s->sc, 0, sizeof(s->sc));
  3353. s->p_field_mv_table_base = NULL;
  3354. for (int i = 0; i < 2; i++)
  3355. for (int j = 0; j < 2; j++)
  3356. s->p_field_mv_table[i][j] = NULL;
  3357. s->dc_val_base = NULL;
  3358. s->coded_block_base = NULL;
  3359. s->mbintra_table = NULL;
  3360. s->cbp_table = NULL;
  3361. s->pred_dir_table = NULL;
  3362. s->mbskip_table = NULL;
  3363. s->er.error_status_table = NULL;
  3364. s->er.er_temp_buffer = NULL;
  3365. s->mb_index2xy = NULL;
  3366. s->context_initialized = 0;
  3367. s->context_reinit = 0;
  3368. }
  3369. static av_cold int update_mpvctx(MpegEncContext *s, const MpegEncContext *s1)
  3370. {
  3371. AVCodecContext *avctx = s->avctx;
  3372. // FIXME the following leads to a data race; instead copy only
  3373. // the necessary fields.
  3374. memcpy(s, s1, sizeof(*s));
  3375. clear_context(s);
  3376. s->avctx = avctx;
  3377. if (s1->context_initialized) {
  3378. int err = ff_mpv_common_init(s);
  3379. if (err < 0)
  3380. return err;
  3381. }
  3382. return 0;
  3383. }
  3384. static int mpeg4_update_thread_context(AVCodecContext *dst,
  3385. const AVCodecContext *src)
  3386. {
  3387. Mpeg4DecContext *s = dst->priv_data;
  3388. const Mpeg4DecContext *s1 = src->priv_data;
  3389. int init = s->h.c.context_initialized;
  3390. int ret;
  3391. if (!init) {
  3392. ret = update_mpvctx(&s->h.c, &s1->h.c);
  3393. if (ret < 0)
  3394. return ret;
  3395. }
  3396. ret = ff_mpeg_update_thread_context(dst, src);
  3397. if (ret < 0)
  3398. return ret;
  3399. // copy all the necessary fields explicitly
  3400. s->time_increment_bits = s1->time_increment_bits;
  3401. s->shape = s1->shape;
  3402. s->vol_sprite_usage = s1->vol_sprite_usage;
  3403. s->sprite_brightness_change = s1->sprite_brightness_change;
  3404. s->sprite_warping_accuracy = s1->sprite_warping_accuracy;
  3405. s->num_sprite_warping_points = s1->num_sprite_warping_points;
  3406. s->h.data_partitioning = s1->h.data_partitioning;
  3407. s->mpeg_quant = s1->mpeg_quant;
  3408. s->rvlc = s1->rvlc;
  3409. s->resync_marker = s1->resync_marker;
  3410. s->t_frame = s1->t_frame;
  3411. s->new_pred = s1->new_pred;
  3412. s->enhancement_type = s1->enhancement_type;
  3413. s->scalability = s1->scalability;
  3414. s->intra_dc_threshold = s1->intra_dc_threshold;
  3415. s->h.divx_packed = s1->h.divx_packed;
  3416. s->divx_version = s1->divx_version;
  3417. s->divx_build = s1->divx_build;
  3418. s->xvid_build = s1->xvid_build;
  3419. s->lavc_build = s1->lavc_build;
  3420. s->vo_type = s1->vo_type;
  3421. s->showed_packed_warning = s1->showed_packed_warning;
  3422. s->vol_control_parameters = s1->vol_control_parameters;
  3423. s->cplx_estimation_trash_i = s1->cplx_estimation_trash_i;
  3424. s->cplx_estimation_trash_p = s1->cplx_estimation_trash_p;
  3425. s->cplx_estimation_trash_b = s1->cplx_estimation_trash_b;
  3426. s->rgb = s1->rgb;
  3427. s->h.skipped_last_frame = s1->h.skipped_last_frame;
  3428. s->h.padding_bug_score = s1->h.padding_bug_score; // FIXME: racy
  3429. s->h.picture_number = s1->h.picture_number;
  3430. memcpy(s->sprite_shift, s1->sprite_shift, sizeof(s1->sprite_shift));
  3431. memcpy(s->sprite_traj, s1->sprite_traj, sizeof(s1->sprite_traj));
  3432. return av_buffer_replace(&s->bitstream_buffer, s1->bitstream_buffer);
  3433. }
  3434. static int mpeg4_update_thread_context_for_user(AVCodecContext *dst,
  3435. const AVCodecContext *src)
  3436. {
  3437. H263DecContext *const h = dst->priv_data;
  3438. const H263DecContext *const h1 = src->priv_data;
  3439. h->c.quarter_sample = h1->c.quarter_sample;
  3440. h->divx_packed = h1->divx_packed;
  3441. return 0;
  3442. }
  3443. #endif
  3444. static av_cold void mpeg4_init_static(void)
  3445. {
  3446. static VLCElem vlc_buf[6498];
  3447. VLCInitState state = VLC_INIT_STATE(vlc_buf);
  3448. VLC_INIT_STATIC_TABLE_FROM_LENGTHS(studio_luma_dc, STUDIO_INTRA_BITS, 19,
  3449. &ff_mpeg4_studio_dc_luma[0][1], 2,
  3450. &ff_mpeg4_studio_dc_luma[0][0], 2, 1,
  3451. 0, 0);
  3452. VLC_INIT_STATIC_TABLE_FROM_LENGTHS(studio_chroma_dc, STUDIO_INTRA_BITS, 19,
  3453. &ff_mpeg4_studio_dc_chroma[0][1], 2,
  3454. &ff_mpeg4_studio_dc_chroma[0][0], 2, 1,
  3455. 0, 0);
  3456. for (unsigned i = 0; i < 12; i++) {
  3457. studio_intra_tab[i] =
  3458. ff_vlc_init_tables_from_lengths(&state, STUDIO_INTRA_BITS, 24,
  3459. &ff_mpeg4_studio_intra[i][0][1], 2,
  3460. &ff_mpeg4_studio_intra[i][0][0], 2, 1,
  3461. 0, 0);
  3462. }
  3463. static uint8_t mpeg4_rl_intra_table[2][2 * MAX_RUN + MAX_LEVEL + 3];
  3464. ff_rl_init(&ff_mpeg4_rl_intra, mpeg4_rl_intra_table);
  3465. ff_h263_init_rl_inter();
  3466. INIT_FIRST_VLC_RL(ff_mpeg4_rl_intra, 554);
  3467. VLC_INIT_RL(ff_rvlc_rl_inter, 1072);
  3468. INIT_FIRST_VLC_RL(ff_rvlc_rl_intra, 1072);
  3469. VLC_INIT_STATIC_TABLE(dc_lum, DC_VLC_BITS, 10 /* 13 */,
  3470. &ff_mpeg4_DCtab_lum[0][1], 2, 1,
  3471. &ff_mpeg4_DCtab_lum[0][0], 2, 1, 0);
  3472. VLC_INIT_STATIC_TABLE(dc_chrom, DC_VLC_BITS, 10 /* 13 */,
  3473. &ff_mpeg4_DCtab_chrom[0][1], 2, 1,
  3474. &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 0);
  3475. VLC_INIT_STATIC_TABLE_FROM_LENGTHS(sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
  3476. ff_sprite_trajectory_lens, 1,
  3477. NULL, 0, 0, 0, 0);
  3478. VLC_INIT_STATIC_SPARSE_TABLE(mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
  3479. &ff_mb_type_b_tab[0][1], 2, 1,
  3480. &ff_mb_type_b_tab[0][0], 2, 1,
  3481. mb_type_b_map, 2, 2, 0);
  3482. }
  3483. static av_cold int decode_init(AVCodecContext *avctx)
  3484. {
  3485. static AVOnce init_static_once = AV_ONCE_INIT;
  3486. Mpeg4DecContext *ctx = avctx->priv_data;
  3487. H263DecContext *const h = &ctx->h;
  3488. MPVUnquantDSPContext unquant_dsp_ctx;
  3489. int ret;
  3490. ctx->divx_version =
  3491. ctx->divx_build =
  3492. ctx->xvid_build =
  3493. ctx->lavc_build = -1;
  3494. if ((ret = ff_h263_decode_init(avctx)) < 0)
  3495. return ret;
  3496. ff_mpv_unquantize_init(&unquant_dsp_ctx,
  3497. avctx->flags & AV_CODEC_FLAG_BITEXACT, 0);
  3498. ctx->dct_unquantize_h263_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
  3499. ctx->dct_unquantize_mpeg2_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
  3500. // dct_unquantize_inter is only used with MPEG-2 quantizers,
  3501. // so that is all we keep.
  3502. ctx->dct_unquantize_mpeg2_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
  3503. h->c.y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  3504. h->c.c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  3505. h->c.h263_pred = 1;
  3506. h->c.low_delay = 0; /* default, might be overridden in the vol header during header parsing */
  3507. h->decode_header = mpeg4_decode_picture_header;
  3508. h->decode_mb = mpeg4_decode_mb;
  3509. ctx->time_increment_bits = 4; /* default value for broken headers */
  3510. ctx->quant_precision = 5;
  3511. avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
  3512. ff_qpeldsp_init(&h->c.qdsp);
  3513. ff_mpeg4videodsp_init(&ctx->mdsp);
  3514. ff_thread_once(&init_static_once, mpeg4_init_static);
  3515. /* Must be after initializing the MPEG-4 static tables */
  3516. if (avctx->extradata_size && !avctx->internal->is_copy) {
  3517. GetBitContext gb;
  3518. if (init_get_bits8(&gb, avctx->extradata, avctx->extradata_size) >= 0)
  3519. ff_mpeg4_parse_picture_header(ctx, &gb, 1, 0);
  3520. }
  3521. return 0;
  3522. }
  3523. static av_cold void mpeg4_flush(AVCodecContext *avctx)
  3524. {
  3525. Mpeg4DecContext *const ctx = avctx->priv_data;
  3526. av_buffer_unref(&ctx->bitstream_buffer);
  3527. ff_mpeg_flush(avctx);
  3528. }
  3529. static av_cold int mpeg4_close(AVCodecContext *avctx)
  3530. {
  3531. Mpeg4DecContext *const ctx = avctx->priv_data;
  3532. av_buffer_unref(&ctx->bitstream_buffer);
  3533. return ff_mpv_decode_close(avctx);
  3534. }
  3535. #define OFFSET(x) offsetof(H263DecContext, x)
  3536. #define FLAGS AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY
  3537. static const AVOption mpeg4_options[] = {
  3538. {"quarter_sample", "1/4 subpel MC", OFFSET(c.quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
  3539. {"divx_packed", "divx style packed b frames", OFFSET(divx_packed), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
  3540. {NULL}
  3541. };
  3542. static const AVClass mpeg4_class = {
  3543. .class_name = "MPEG4 Video Decoder",
  3544. .item_name = av_default_item_name,
  3545. .option = mpeg4_options,
  3546. .version = LIBAVUTIL_VERSION_INT,
  3547. };
  3548. const FFCodec ff_mpeg4_decoder = {
  3549. .p.name = "mpeg4",
  3550. CODEC_LONG_NAME("MPEG-4 part 2"),
  3551. .p.type = AVMEDIA_TYPE_VIDEO,
  3552. .p.id = AV_CODEC_ID_MPEG4,
  3553. .priv_data_size = sizeof(Mpeg4DecContext),
  3554. .init = decode_init,
  3555. FF_CODEC_DECODE_CB(ff_h263_decode_frame),
  3556. .close = mpeg4_close,
  3557. .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
  3558. AV_CODEC_CAP_DELAY | AV_CODEC_CAP_FRAME_THREADS,
  3559. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
  3560. FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
  3561. .flush = mpeg4_flush,
  3562. .p.max_lowres = 3,
  3563. .p.profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles),
  3564. UPDATE_THREAD_CONTEXT(mpeg4_update_thread_context),
  3565. UPDATE_THREAD_CONTEXT_FOR_USER(mpeg4_update_thread_context_for_user),
  3566. .p.priv_class = &mpeg4_class,
  3567. .hw_configs = (const AVCodecHWConfigInternal *const []) {
  3568. #if CONFIG_MPEG4_NVDEC_HWACCEL
  3569. HWACCEL_NVDEC(mpeg4),
  3570. #endif
  3571. #if CONFIG_MPEG4_VAAPI_HWACCEL
  3572. HWACCEL_VAAPI(mpeg4),
  3573. #endif
  3574. #if CONFIG_MPEG4_VDPAU_HWACCEL
  3575. HWACCEL_VDPAU(mpeg4),
  3576. #endif
  3577. #if CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL
  3578. HWACCEL_VIDEOTOOLBOX(mpeg4),
  3579. #endif
  3580. NULL
  3581. },
  3582. };
  3583. #endif /* CONFIG_MPEG4_DECODER */