vp8dsp_mmi.c 154 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440
  1. /*
  2. * Loongson SIMD optimized vp8dsp
  3. *
  4. * Copyright (c) 2016 Loongson Technology Corporation Limited
  5. * Copyright (c) 2016 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "vp8dsp_mips.h"
  24. #include "constants.h"
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/intfloat.h"
  27. #include "libavutil/mips/mmiutils.h"
  28. #include "libavutil/mem_internal.h"
  29. #define DECLARE_DOUBLE_1 double db_1
  30. #define DECLARE_DOUBLE_2 double db_2
  31. #define DECLARE_UINT32_T uint32_t it_1
  32. #define RESTRICT_ASM_DOUBLE_1 [db_1]"=&f"(db_1)
  33. #define RESTRICT_ASM_DOUBLE_2 [db_2]"=&f"(db_2)
  34. #define RESTRICT_ASM_UINT32_T [it_1]"=&r"(it_1)
  35. #define MMI_PCMPGTUB(dst, src1, src2) \
  36. "pcmpeqb %[db_1], "#src1", "#src2" \n\t" \
  37. "pmaxub %[db_2], "#src1", "#src2" \n\t" \
  38. "pcmpeqb %[db_2], %[db_2], "#src1" \n\t" \
  39. "pxor "#dst", %[db_2], %[db_1] \n\t"
  40. #define MMI_BTOH(dst_l, dst_r, src) \
  41. "pxor %[db_1], %[db_1], %[db_1] \n\t" \
  42. "pcmpgtb %[db_2], %[db_1], "#src" \n\t" \
  43. "punpcklbh "#dst_r", "#src", %[db_2] \n\t" \
  44. "punpckhbh "#dst_l", "#src", %[db_2] \n\t"
  45. #define MMI_VP8_LOOP_FILTER \
  46. /* Calculation of hev */ \
  47. "dmtc1 %[thresh], %[ftmp3] \n\t" \
  48. "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  49. "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  50. "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  51. "pasubub %[ftmp0], %[p1], %[p0] \n\t" \
  52. "pasubub %[ftmp1], %[q1], %[q0] \n\t" \
  53. "pmaxub %[ftmp0], %[ftmp0], %[ftmp1] \n\t" \
  54. MMI_PCMPGTUB(%[hev], %[ftmp0], %[ftmp3]) \
  55. /* Calculation of mask */ \
  56. "pasubub %[ftmp1], %[p0], %[q0] \n\t" \
  57. "paddusb %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
  58. "pasubub %[ftmp2], %[p1], %[q1] \n\t" \
  59. "li %[tmp0], 0x09 \n\t" \
  60. "dmtc1 %[tmp0], %[ftmp3] \n\t" \
  61. PSRLB_MMI(%[ftmp2], %[ftmp3], %[ftmp4], %[ftmp5], %[ftmp2]) \
  62. "paddusb %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
  63. "dmtc1 %[e], %[ftmp3] \n\t" \
  64. "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  65. "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  66. "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  67. MMI_PCMPGTUB(%[mask], %[ftmp1], %[ftmp3]) \
  68. "pmaxub %[mask], %[mask], %[ftmp0] \n\t" \
  69. "pasubub %[ftmp1], %[p3], %[p2] \n\t" \
  70. "pasubub %[ftmp2], %[p2], %[p1] \n\t" \
  71. "pmaxub %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
  72. "pmaxub %[mask], %[mask], %[ftmp1] \n\t" \
  73. "pasubub %[ftmp1], %[q3], %[q2] \n\t" \
  74. "pasubub %[ftmp2], %[q2], %[q1] \n\t" \
  75. "pmaxub %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
  76. "pmaxub %[mask], %[mask], %[ftmp1] \n\t" \
  77. "dmtc1 %[i], %[ftmp3] \n\t" \
  78. "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  79. "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  80. "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  81. MMI_PCMPGTUB(%[mask], %[mask], %[ftmp3]) \
  82. "pcmpeqw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
  83. "pxor %[mask], %[mask], %[ftmp3] \n\t" \
  84. /* VP8_MBFILTER */ \
  85. "li %[tmp0], 0x80808080 \n\t" \
  86. "dmtc1 %[tmp0], %[ftmp7] \n\t" \
  87. "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" \
  88. "pxor %[p2], %[p2], %[ftmp7] \n\t" \
  89. "pxor %[p1], %[p1], %[ftmp7] \n\t" \
  90. "pxor %[p0], %[p0], %[ftmp7] \n\t" \
  91. "pxor %[q0], %[q0], %[ftmp7] \n\t" \
  92. "pxor %[q1], %[q1], %[ftmp7] \n\t" \
  93. "pxor %[q2], %[q2], %[ftmp7] \n\t" \
  94. "psubsb %[ftmp4], %[p1], %[q1] \n\t" \
  95. "psubb %[ftmp5], %[q0], %[p0] \n\t" \
  96. MMI_BTOH(%[ftmp1], %[ftmp0], %[ftmp5]) \
  97. MMI_BTOH(%[ftmp3], %[ftmp2], %[ftmp4]) \
  98. /* Right part */ \
  99. "paddh %[ftmp5], %[ftmp0], %[ftmp0] \n\t" \
  100. "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" \
  101. "paddh %[ftmp0], %[ftmp2], %[ftmp0] \n\t" \
  102. /* Left part */ \
  103. "paddh %[ftmp5], %[ftmp1], %[ftmp1] \n\t" \
  104. "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" \
  105. "paddh %[ftmp1], %[ftmp3], %[ftmp1] \n\t" \
  106. /* Combine left and right part */ \
  107. "packsshb %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
  108. "pand %[ftmp1], %[ftmp1], %[mask] \n\t" \
  109. "pand %[ftmp2], %[ftmp1], %[hev] \n\t" \
  110. "li %[tmp0], 0x04040404 \n\t" \
  111. "dmtc1 %[tmp0], %[ftmp0] \n\t" \
  112. "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
  113. "paddsb %[ftmp3], %[ftmp2], %[ftmp0] \n\t" \
  114. "li %[tmp0], 0x0B \n\t" \
  115. "dmtc1 %[tmp0], %[ftmp4] \n\t" \
  116. PSRAB_MMI(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], %[ftmp3]) \
  117. "li %[tmp0], 0x03030303 \n\t" \
  118. "dmtc1 %[tmp0], %[ftmp0] \n\t" \
  119. "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
  120. "paddsb %[ftmp4], %[ftmp2], %[ftmp0] \n\t" \
  121. "li %[tmp0], 0x0B \n\t" \
  122. "dmtc1 %[tmp0], %[ftmp2] \n\t" \
  123. PSRAB_MMI(%[ftmp4], %[ftmp2], %[ftmp5], %[ftmp6], %[ftmp4]) \
  124. "psubsb %[q0], %[q0], %[ftmp3] \n\t" \
  125. "paddsb %[p0], %[p0], %[ftmp4] \n\t" \
  126. /* filt_val &= ~hev */ \
  127. "pcmpeqw %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
  128. "pxor %[hev], %[hev], %[ftmp0] \n\t" \
  129. "pand %[ftmp1], %[ftmp1], %[hev] \n\t" \
  130. MMI_BTOH(%[ftmp5], %[ftmp6], %[ftmp1]) \
  131. "li %[tmp0], 0x07 \n\t" \
  132. "dmtc1 %[tmp0], %[ftmp2] \n\t" \
  133. "li %[tmp0], 0x001b001b \n\t" \
  134. "dmtc1 %[tmp0], %[ftmp1] \n\t" \
  135. "punpcklwd %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
  136. "li %[tmp0], 0x003f003f \n\t" \
  137. "dmtc1 %[tmp0], %[ftmp0] \n\t" \
  138. "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
  139. /* Right part */ \
  140. "pmullh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
  141. "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
  142. "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  143. /* Left part */ \
  144. "pmullh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
  145. "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
  146. "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
  147. /* Combine left and right part */ \
  148. "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
  149. "psubsb %[q0], %[q0], %[ftmp4] \n\t" \
  150. "pxor %[q0], %[q0], %[ftmp7] \n\t" \
  151. "paddsb %[p0], %[p0], %[ftmp4] \n\t" \
  152. "pxor %[p0], %[p0], %[ftmp7] \n\t" \
  153. "li %[tmp0], 0x00120012 \n\t" \
  154. "dmtc1 %[tmp0], %[ftmp1] \n\t" \
  155. "punpcklwd %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
  156. /* Right part */ \
  157. "pmullh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
  158. "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
  159. "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  160. /* Left part */ \
  161. "pmullh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
  162. "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
  163. "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
  164. /* Combine left and right part */ \
  165. "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
  166. "psubsb %[q1], %[q1], %[ftmp4] \n\t" \
  167. "pxor %[q1], %[q1], %[ftmp7] \n\t" \
  168. "paddsb %[p1], %[p1], %[ftmp4] \n\t" \
  169. "pxor %[p1], %[p1], %[ftmp7] \n\t" \
  170. "li %[tmp0], 0x03 \n\t" \
  171. "dmtc1 %[tmp0], %[ftmp1] \n\t" \
  172. /* Right part */ \
  173. "psllh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
  174. "paddh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" \
  175. "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
  176. "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  177. /* Left part */ \
  178. "psllh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
  179. "paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
  180. "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
  181. "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
  182. /* Combine left and right part */ \
  183. "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
  184. "psubsb %[q2], %[q2], %[ftmp4] \n\t" \
  185. "pxor %[q2], %[q2], %[ftmp7] \n\t" \
  186. "paddsb %[p2], %[p2], %[ftmp4] \n\t" \
  187. "pxor %[p2], %[p2], %[ftmp7] \n\t"
  188. #define PUT_VP8_EPEL4_H6_MMI(src, dst) \
  189. MMI_ULWC1(%[ftmp1], src, 0x00) \
  190. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  191. "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
  192. \
  193. MMI_ULWC1(%[ftmp1], src, -0x01) \
  194. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  195. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  196. "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  197. \
  198. MMI_ULWC1(%[ftmp1], src, -0x02) \
  199. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  200. "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
  201. "paddsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
  202. \
  203. MMI_ULWC1(%[ftmp1], src, 0x01) \
  204. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  205. "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
  206. \
  207. MMI_ULWC1(%[ftmp1], src, 0x02) \
  208. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  209. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  210. "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  211. \
  212. MMI_ULWC1(%[ftmp1], src, 0x03) \
  213. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  214. "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
  215. "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  216. \
  217. "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
  218. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
  219. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  220. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  221. \
  222. MMI_SWC1(%[ftmp1], dst, 0x00)
  223. #define PUT_VP8_EPEL4_H4_MMI(src, dst) \
  224. MMI_ULWC1(%[ftmp1], src, 0x00) \
  225. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  226. "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
  227. \
  228. MMI_ULWC1(%[ftmp1], src, -0x01) \
  229. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  230. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  231. "psubsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
  232. \
  233. MMI_ULWC1(%[ftmp1], src, 0x01) \
  234. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  235. "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
  236. \
  237. MMI_ULWC1(%[ftmp1], src, 0x02) \
  238. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  239. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  240. "psubh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  241. \
  242. "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
  243. \
  244. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
  245. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  246. \
  247. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  248. MMI_SWC1(%[ftmp1], dst, 0x00)
  249. #define PUT_VP8_EPEL4_V6_MMI(src, src1, dst, srcstride) \
  250. MMI_ULWC1(%[ftmp1], src, 0x00) \
  251. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  252. "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
  253. \
  254. PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
  255. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  256. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  257. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  258. "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  259. \
  260. PTR_SUBU ""#src1", "#src1", "#srcstride" \n\t" \
  261. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  262. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  263. "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
  264. "paddsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
  265. \
  266. PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
  267. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  268. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  269. "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
  270. \
  271. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  272. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  273. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  274. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  275. "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  276. \
  277. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  278. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  279. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  280. "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
  281. "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  282. \
  283. "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
  284. \
  285. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
  286. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  287. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  288. \
  289. MMI_SWC1(%[ftmp1], dst, 0x00)
  290. #define PUT_VP8_EPEL4_V4_MMI(src, src1, dst, srcstride) \
  291. MMI_ULWC1(%[ftmp1], src, 0x00) \
  292. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  293. "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
  294. \
  295. PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
  296. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  297. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  298. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  299. "psubsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
  300. \
  301. PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
  302. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  303. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  304. "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
  305. \
  306. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  307. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  308. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  309. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  310. "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  311. \
  312. "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
  313. \
  314. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
  315. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  316. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  317. \
  318. MMI_SWC1(%[ftmp1], dst, 0x00)
  319. #define PUT_VP8_EPEL8_H6_MMI(src, dst) \
  320. MMI_ULDC1(%[ftmp1], src, 0x00) \
  321. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  322. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  323. "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
  324. "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
  325. \
  326. MMI_ULDC1(%[ftmp1], src, -0x01) \
  327. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  328. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  329. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  330. "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
  331. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  332. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  333. \
  334. MMI_ULDC1(%[ftmp1], src, -0x02) \
  335. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  336. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  337. "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
  338. "pmullh %[ftmp3], %[ftmp3], %[filter0] \n\t" \
  339. "paddsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
  340. "paddsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
  341. \
  342. MMI_ULDC1(%[ftmp1], src, 0x01) \
  343. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  344. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  345. "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
  346. "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
  347. \
  348. MMI_ULDC1(%[ftmp1], src, 0x02) \
  349. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  350. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  351. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  352. "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
  353. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  354. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  355. \
  356. MMI_ULDC1(%[ftmp1], src, 0x03) \
  357. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  358. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  359. "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
  360. "pmullh %[ftmp3], %[ftmp3], %[filter5] \n\t" \
  361. "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  362. "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  363. \
  364. "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
  365. "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
  366. \
  367. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
  368. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
  369. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  370. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  371. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  372. \
  373. MMI_SDC1(%[ftmp1], dst, 0x00)
  374. #define PUT_VP8_EPEL8_H4_MMI(src, dst) \
  375. MMI_ULDC1(%[ftmp1], src, 0x00) \
  376. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  377. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  378. "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
  379. "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
  380. \
  381. MMI_ULDC1(%[ftmp1], src, -0x01) \
  382. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  383. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  384. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  385. "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
  386. "psubsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
  387. "psubsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
  388. \
  389. MMI_ULDC1(%[ftmp1], src, 0x01) \
  390. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  391. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  392. "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
  393. "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
  394. \
  395. MMI_ULDC1(%[ftmp1], src, 0x02) \
  396. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  397. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  398. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  399. "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
  400. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  401. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  402. \
  403. "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
  404. "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
  405. \
  406. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
  407. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
  408. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  409. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  410. \
  411. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  412. MMI_SDC1(%[ftmp1], dst, 0x00)
  413. #define PUT_VP8_EPEL8_V6_MMI(src, src1, dst, srcstride) \
  414. MMI_ULDC1(%[ftmp1], src, 0x00) \
  415. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  416. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  417. "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
  418. "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
  419. \
  420. PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
  421. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  422. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  423. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  424. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  425. "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
  426. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  427. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  428. \
  429. PTR_SUBU ""#src1", "#src1", "#srcstride" \n\t" \
  430. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  431. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  432. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  433. "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
  434. "pmullh %[ftmp3], %[ftmp3], %[filter0] \n\t" \
  435. "paddsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
  436. "paddsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
  437. \
  438. PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
  439. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  440. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  441. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  442. "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
  443. "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
  444. \
  445. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  446. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  447. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  448. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  449. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  450. "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
  451. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  452. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  453. \
  454. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  455. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  456. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  457. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  458. "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
  459. "pmullh %[ftmp3], %[ftmp3], %[filter5] \n\t" \
  460. "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  461. "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  462. \
  463. "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
  464. "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
  465. \
  466. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
  467. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
  468. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  469. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  470. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  471. \
  472. MMI_SDC1(%[ftmp1], dst, 0x00)
  473. #define PUT_VP8_EPEL8_V4_MMI(src, src1, dst, srcstride) \
  474. MMI_ULDC1(%[ftmp1], src, 0x00) \
  475. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  476. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  477. "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
  478. "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
  479. \
  480. PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
  481. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  482. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  483. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  484. "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
  485. "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
  486. "psubsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
  487. "psubsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
  488. \
  489. PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
  490. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  491. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  492. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  493. "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
  494. "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
  495. \
  496. PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
  497. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  498. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  499. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  500. "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
  501. "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
  502. "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  503. "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  504. \
  505. "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
  506. "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
  507. \
  508. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
  509. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
  510. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  511. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  512. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  513. \
  514. MMI_SDC1(%[ftmp1], dst, 0x00)
  515. #define PUT_VP8_BILINEAR8_H_MMI(src, dst) \
  516. MMI_ULDC1(%[ftmp1], src, 0x00) \
  517. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  518. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  519. "pmullh %[ftmp5], %[ftmp2], %[a] \n\t" \
  520. "pmullh %[ftmp6], %[ftmp3], %[a] \n\t" \
  521. \
  522. MMI_ULDC1(%[ftmp1], src, 0x01) \
  523. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  524. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  525. "pmullh %[ftmp2], %[ftmp2], %[b] \n\t" \
  526. "pmullh %[ftmp3], %[ftmp3], %[b] \n\t" \
  527. "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  528. "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  529. \
  530. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_4] \n\t" \
  531. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_4] \n\t" \
  532. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  533. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  534. \
  535. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  536. MMI_SDC1(%[ftmp1], dst, 0x00)
  537. #define PUT_VP8_BILINEAR4_H_MMI(src, dst) \
  538. MMI_ULWC1(%[ftmp1], src, 0x00) \
  539. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  540. "pmullh %[ftmp3], %[ftmp2], %[a] \n\t" \
  541. \
  542. MMI_ULWC1(%[ftmp1], src, 0x01) \
  543. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  544. "pmullh %[ftmp2], %[ftmp2], %[b] \n\t" \
  545. "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  546. \
  547. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t" \
  548. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  549. \
  550. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  551. MMI_SWC1(%[ftmp1], dst, 0x00)
  552. #define PUT_VP8_BILINEAR8_V_MMI(src, src1, dst, sstride) \
  553. MMI_ULDC1(%[ftmp1], src, 0x00) \
  554. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  555. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  556. "pmullh %[ftmp5], %[ftmp2], %[c] \n\t" \
  557. "pmullh %[ftmp6], %[ftmp3], %[c] \n\t" \
  558. \
  559. PTR_ADDU ""#src1", "#src", "#sstride" \n\t" \
  560. MMI_ULDC1(%[ftmp1], src1, 0x00) \
  561. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  562. "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
  563. "pmullh %[ftmp2], %[ftmp2], %[d] \n\t" \
  564. "pmullh %[ftmp3], %[ftmp3], %[d] \n\t" \
  565. "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
  566. "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
  567. \
  568. "paddsh %[ftmp5], %[ftmp5], %[ff_pw_4] \n\t" \
  569. "paddsh %[ftmp6], %[ftmp6], %[ff_pw_4] \n\t" \
  570. "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
  571. "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
  572. \
  573. "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
  574. MMI_SDC1(%[ftmp1], dst, 0x00)
  575. #define PUT_VP8_BILINEAR4_V_MMI(src, src1, dst, sstride) \
  576. MMI_ULWC1(%[ftmp1], src, 0x00) \
  577. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  578. "pmullh %[ftmp3], %[ftmp2], %[c] \n\t" \
  579. \
  580. PTR_ADDU ""#src1", "#src", "#sstride" \n\t" \
  581. MMI_ULWC1(%[ftmp1], src1, 0x00) \
  582. "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
  583. "pmullh %[ftmp2], %[ftmp2], %[d] \n\t" \
  584. "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
  585. \
  586. "paddsh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t" \
  587. "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
  588. \
  589. "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
  590. MMI_SWC1(%[ftmp1], dst, 0x00)
  591. DECLARE_ALIGNED(8, static const uint64_t, fourtap_subpel_filters[7][6]) = {
  592. {0x0000000000000000, 0x0006000600060006, 0x007b007b007b007b,
  593. 0x000c000c000c000c, 0x0001000100010001, 0x0000000000000000},
  594. {0x0002000200020002, 0x000b000b000b000b, 0x006c006c006c006c,
  595. 0x0024002400240024, 0x0008000800080008, 0x0001000100010001},
  596. {0x0000000000000000, 0x0009000900090009, 0x005d005d005d005d,
  597. 0x0032003200320032, 0x0006000600060006, 0x0000000000000000},
  598. {0x0003000300030003, 0x0010001000100010, 0x004d004d004d004d,
  599. 0x004d004d004d004d, 0x0010001000100010, 0x0003000300030003},
  600. {0x0000000000000000, 0x0006000600060006, 0x0032003200320032,
  601. 0x005d005d005d005d, 0x0009000900090009, 0x0000000000000000},
  602. {0x0001000100010001, 0x0008000800080008, 0x0024002400240024,
  603. 0x006c006c006c006c, 0x000b000b000b000b, 0x0002000200020002},
  604. {0x0000000000000000, 0x0001000100010001, 0x000c000c000c000c,
  605. 0x007b007b007b007b, 0x0006000600060006, 0x0000000000000000}
  606. };
  607. #if 0
  608. #define FILTER_6TAP(src, F, stride) \
  609. cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
  610. F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
  611. F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
  612. #define FILTER_4TAP(src, F, stride) \
  613. cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
  614. F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
  615. static const uint8_t subpel_filters[7][6] = {
  616. { 0, 6, 123, 12, 1, 0 },
  617. { 2, 11, 108, 36, 8, 1 },
  618. { 0, 9, 93, 50, 6, 0 },
  619. { 3, 16, 77, 77, 16, 3 },
  620. { 0, 6, 50, 93, 9, 0 },
  621. { 1, 8, 36, 108, 11, 2 },
  622. { 0, 1, 12, 123, 6, 0 },
  623. };
  624. #define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
  625. #define MUL_35468(a) (((a) * 35468) >> 16)
  626. #endif
  627. #define clip_int8(n) (cm[(n) + 0x80] - 0x80)
  628. static av_always_inline void vp8_filter_common_is4tap(uint8_t *p,
  629. ptrdiff_t stride)
  630. {
  631. av_unused int p1 = p[-2 * stride];
  632. av_unused int p0 = p[-1 * stride];
  633. av_unused int q0 = p[ 0 * stride];
  634. av_unused int q1 = p[ 1 * stride];
  635. int a, f1, f2;
  636. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  637. a = 3 * (q0 - p0);
  638. a += clip_int8(p1 - q1);
  639. a = clip_int8(a);
  640. // We deviate from the spec here with c(a+3) >> 3
  641. // since that's what libvpx does.
  642. f1 = FFMIN(a + 4, 127) >> 3;
  643. f2 = FFMIN(a + 3, 127) >> 3;
  644. // Despite what the spec says, we do need to clamp here to
  645. // be bitexact with libvpx.
  646. p[-1 * stride] = cm[p0 + f2];
  647. p[ 0 * stride] = cm[q0 - f1];
  648. }
  649. static av_always_inline void vp8_filter_common_isnot4tap(uint8_t *p,
  650. ptrdiff_t stride)
  651. {
  652. av_unused int p1 = p[-2 * stride];
  653. av_unused int p0 = p[-1 * stride];
  654. av_unused int q0 = p[ 0 * stride];
  655. av_unused int q1 = p[ 1 * stride];
  656. int a, f1, f2;
  657. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  658. a = 3 * (q0 - p0);
  659. a = clip_int8(a);
  660. // We deviate from the spec here with c(a+3) >> 3
  661. // since that's what libvpx does.
  662. f1 = FFMIN(a + 4, 127) >> 3;
  663. f2 = FFMIN(a + 3, 127) >> 3;
  664. // Despite what the spec says, we do need to clamp here to
  665. // be bitexact with libvpx.
  666. p[-1 * stride] = cm[p0 + f2];
  667. p[ 0 * stride] = cm[q0 - f1];
  668. a = (f1 + 1) >> 1;
  669. p[-2 * stride] = cm[p1 + a];
  670. p[ 1 * stride] = cm[q1 - a];
  671. }
  672. static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
  673. int flim)
  674. {
  675. av_unused int p1 = p[-2 * stride];
  676. av_unused int p0 = p[-1 * stride];
  677. av_unused int q0 = p[ 0 * stride];
  678. av_unused int q1 = p[ 1 * stride];
  679. return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
  680. }
  681. static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
  682. {
  683. av_unused int p1 = p[-2 * stride];
  684. av_unused int p0 = p[-1 * stride];
  685. av_unused int q0 = p[ 0 * stride];
  686. av_unused int q1 = p[ 1 * stride];
  687. return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
  688. }
  689. static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
  690. {
  691. int a0, a1, a2, w;
  692. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  693. av_unused int p2 = p[-3 * stride];
  694. av_unused int p1 = p[-2 * stride];
  695. av_unused int p0 = p[-1 * stride];
  696. av_unused int q0 = p[ 0 * stride];
  697. av_unused int q1 = p[ 1 * stride];
  698. av_unused int q2 = p[ 2 * stride];
  699. w = clip_int8(p1 - q1);
  700. w = clip_int8(w + 3 * (q0 - p0));
  701. a0 = (27 * w + 63) >> 7;
  702. a1 = (18 * w + 63) >> 7;
  703. a2 = (9 * w + 63) >> 7;
  704. p[-3 * stride] = cm[p2 + a2];
  705. p[-2 * stride] = cm[p1 + a1];
  706. p[-1 * stride] = cm[p0 + a0];
  707. p[ 0 * stride] = cm[q0 - a0];
  708. p[ 1 * stride] = cm[q1 - a1];
  709. p[ 2 * stride] = cm[q2 - a2];
  710. }
  711. static av_always_inline int vp8_normal_limit(uint8_t *p, ptrdiff_t stride,
  712. int E, int I)
  713. {
  714. av_unused int p3 = p[-4 * stride];
  715. av_unused int p2 = p[-3 * stride];
  716. av_unused int p1 = p[-2 * stride];
  717. av_unused int p0 = p[-1 * stride];
  718. av_unused int q0 = p[ 0 * stride];
  719. av_unused int q1 = p[ 1 * stride];
  720. av_unused int q2 = p[ 2 * stride];
  721. av_unused int q3 = p[ 3 * stride];
  722. return vp8_simple_limit(p, stride, E) &&
  723. FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I &&
  724. FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I &&
  725. FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I;
  726. }
  727. static av_always_inline void vp8_v_loop_filter8_mmi(uint8_t *dst,
  728. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  729. {
  730. double ftmp[18];
  731. uint32_t tmp[1];
  732. DECLARE_DOUBLE_1;
  733. DECLARE_DOUBLE_2;
  734. DECLARE_UINT32_T;
  735. DECLARE_VAR_ALL64;
  736. __asm__ volatile(
  737. /* Get data from dst */
  738. MMI_ULDC1(%[q0], %[dst], 0x0)
  739. PTR_SUBU "%[tmp0], %[dst], %[stride] \n\t"
  740. MMI_ULDC1(%[p0], %[tmp0], 0x0)
  741. PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
  742. MMI_ULDC1(%[p1], %[tmp0], 0x0)
  743. PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
  744. MMI_ULDC1(%[p2], %[tmp0], 0x0)
  745. PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
  746. MMI_ULDC1(%[p3], %[tmp0], 0x0)
  747. PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
  748. MMI_ULDC1(%[q1], %[tmp0], 0x0)
  749. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  750. MMI_ULDC1(%[q2], %[tmp0], 0x0)
  751. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  752. MMI_ULDC1(%[q3], %[tmp0], 0x0)
  753. MMI_VP8_LOOP_FILTER
  754. /* Move to dst */
  755. MMI_USDC1(%[q0], %[dst], 0x0)
  756. PTR_SUBU "%[tmp0], %[dst], %[stride] \n\t"
  757. MMI_USDC1(%[p0], %[tmp0], 0x0)
  758. PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
  759. MMI_USDC1(%[p1], %[tmp0], 0x0)
  760. PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
  761. MMI_USDC1(%[p2], %[tmp0], 0x0)
  762. PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
  763. MMI_USDC1(%[q1], %[tmp0], 0x0)
  764. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  765. MMI_USDC1(%[q2], %[tmp0], 0x0)
  766. : RESTRICT_ASM_ALL64
  767. [p3]"=&f"(ftmp[0]), [p2]"=&f"(ftmp[1]),
  768. [p1]"=&f"(ftmp[2]), [p0]"=&f"(ftmp[3]),
  769. [q0]"=&f"(ftmp[4]), [q1]"=&f"(ftmp[5]),
  770. [q2]"=&f"(ftmp[6]), [q3]"=&f"(ftmp[7]),
  771. [ftmp0]"=&f"(ftmp[8]), [ftmp1]"=&f"(ftmp[9]),
  772. [ftmp2]"=&f"(ftmp[10]), [ftmp3]"=&f"(ftmp[11]),
  773. [hev]"=&f"(ftmp[12]), [mask]"=&f"(ftmp[13]),
  774. [ftmp4]"=&f"(ftmp[14]), [ftmp5]"=&f"(ftmp[15]),
  775. [ftmp6]"=&f"(ftmp[16]), [ftmp7]"=&f"(ftmp[17]),
  776. [dst]"+&r"(dst), [tmp0]"=&r"(tmp[0]),
  777. RESTRICT_ASM_DOUBLE_1, RESTRICT_ASM_DOUBLE_2,
  778. RESTRICT_ASM_UINT32_T
  779. : [e]"r"((mips_reg)flim_E), [thresh]"r"((mips_reg)hev_thresh),
  780. [i]"r"((mips_reg)flim_I), [stride]"r"((mips_reg)stride)
  781. : "memory"
  782. );
  783. }
  784. static av_always_inline void vp8_v_loop_filter8_inner_mmi(uint8_t *dst,
  785. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  786. {
  787. int i;
  788. for (i = 0; i < 8; i++)
  789. if (vp8_normal_limit(dst + i * 1, stride, flim_E, flim_I)) {
  790. int hv = hev(dst + i * 1, stride, hev_thresh);
  791. if (hv)
  792. vp8_filter_common_is4tap(dst + i * 1, stride);
  793. else
  794. vp8_filter_common_isnot4tap(dst + i * 1, stride);
  795. }
  796. }
  797. static av_always_inline void vp8_h_loop_filter8_mmi(uint8_t *dst,
  798. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  799. {
  800. double ftmp[18];
  801. uint32_t tmp[1];
  802. DECLARE_DOUBLE_1;
  803. DECLARE_DOUBLE_2;
  804. DECLARE_UINT32_T;
  805. DECLARE_VAR_ALL64;
  806. __asm__ volatile(
  807. /* Get data from dst */
  808. MMI_ULDC1(%[p3], %[dst], -0x04)
  809. PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
  810. MMI_ULDC1(%[p2], %[tmp0], -0x04)
  811. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  812. MMI_ULDC1(%[p1], %[tmp0], -0x04)
  813. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  814. MMI_ULDC1(%[p0], %[tmp0], -0x04)
  815. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  816. MMI_ULDC1(%[q0], %[tmp0], -0x04)
  817. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  818. MMI_ULDC1(%[q1], %[tmp0], -0x04)
  819. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  820. MMI_ULDC1(%[q2], %[tmp0], -0x04)
  821. PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
  822. MMI_ULDC1(%[q3], %[tmp0], -0x04)
  823. /* Matrix transpose */
  824. TRANSPOSE_8B(%[p3], %[p2], %[p1], %[p0],
  825. %[q0], %[q1], %[q2], %[q3],
  826. %[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4])
  827. MMI_VP8_LOOP_FILTER
  828. /* Matrix transpose */
  829. TRANSPOSE_8B(%[p3], %[p2], %[p1], %[p0],
  830. %[q0], %[q1], %[q2], %[q3],
  831. %[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4])
  832. /* Move to dst */
  833. MMI_USDC1(%[p3], %[dst], -0x04)
  834. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  835. MMI_USDC1(%[p2], %[dst], -0x04)
  836. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  837. MMI_USDC1(%[p1], %[dst], -0x04)
  838. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  839. MMI_USDC1(%[p0], %[dst], -0x04)
  840. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  841. MMI_USDC1(%[q0], %[dst], -0x04)
  842. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  843. MMI_USDC1(%[q1], %[dst], -0x04)
  844. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  845. MMI_USDC1(%[q2], %[dst], -0x04)
  846. PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
  847. MMI_USDC1(%[q3], %[dst], -0x04)
  848. : RESTRICT_ASM_ALL64
  849. [p3]"=&f"(ftmp[0]), [p2]"=&f"(ftmp[1]),
  850. [p1]"=&f"(ftmp[2]), [p0]"=&f"(ftmp[3]),
  851. [q0]"=&f"(ftmp[4]), [q1]"=&f"(ftmp[5]),
  852. [q2]"=&f"(ftmp[6]), [q3]"=&f"(ftmp[7]),
  853. [ftmp0]"=&f"(ftmp[8]), [ftmp1]"=&f"(ftmp[9]),
  854. [ftmp2]"=&f"(ftmp[10]), [ftmp3]"=&f"(ftmp[11]),
  855. [hev]"=&f"(ftmp[12]), [mask]"=&f"(ftmp[13]),
  856. [ftmp4]"=&f"(ftmp[14]), [ftmp5]"=&f"(ftmp[15]),
  857. [ftmp6]"=&f"(ftmp[16]), [ftmp7]"=&f"(ftmp[17]),
  858. [dst]"+&r"(dst), [tmp0]"=&r"(tmp[0]),
  859. RESTRICT_ASM_DOUBLE_1, RESTRICT_ASM_DOUBLE_2,
  860. RESTRICT_ASM_UINT32_T
  861. : [e]"r"((mips_reg)flim_E), [thresh]"r"((mips_reg)hev_thresh),
  862. [i]"r"((mips_reg)flim_I), [stride]"r"((mips_reg)stride)
  863. : "memory"
  864. );
  865. }
  866. static av_always_inline void vp8_h_loop_filter8_inner_mmi(uint8_t *dst,
  867. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  868. {
  869. int i;
  870. for (i = 0; i < 8; i++)
  871. if (vp8_normal_limit(dst + i * stride, 1, flim_E, flim_I)) {
  872. int hv = hev(dst + i * stride, 1, hev_thresh);
  873. if (hv)
  874. vp8_filter_common_is4tap(dst + i * stride, 1);
  875. else
  876. vp8_filter_common_isnot4tap(dst + i * stride, 1);
  877. }
  878. }
  879. void ff_vp8_luma_dc_wht_mmi(int16_t block[4][4][16], int16_t dc[16])
  880. {
  881. #if 1
  882. double ftmp[8];
  883. DECLARE_VAR_ALL64;
  884. __asm__ volatile (
  885. MMI_LDC1(%[ftmp0], %[dc], 0x00)
  886. MMI_LDC1(%[ftmp1], %[dc], 0x08)
  887. MMI_LDC1(%[ftmp2], %[dc], 0x10)
  888. MMI_LDC1(%[ftmp3], %[dc], 0x18)
  889. "paddsh %[ftmp4], %[ftmp0], %[ftmp3] \n\t"
  890. "psubsh %[ftmp5], %[ftmp0], %[ftmp3] \n\t"
  891. "paddsh %[ftmp6], %[ftmp1], %[ftmp2] \n\t"
  892. "psubsh %[ftmp7], %[ftmp1], %[ftmp2] \n\t"
  893. "paddsh %[ftmp0], %[ftmp4], %[ftmp6] \n\t"
  894. "paddsh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
  895. "psubsh %[ftmp2], %[ftmp4], %[ftmp6] \n\t"
  896. "psubsh %[ftmp3], %[ftmp5], %[ftmp7] \n\t"
  897. MMI_SDC1(%[ftmp0], %[dc], 0x00)
  898. MMI_SDC1(%[ftmp1], %[dc], 0x08)
  899. MMI_SDC1(%[ftmp2], %[dc], 0x10)
  900. MMI_SDC1(%[ftmp3], %[dc], 0x18)
  901. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  902. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  903. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  904. [ftmp6]"=&f"(ftmp[6]),
  905. RESTRICT_ASM_ALL64
  906. [ftmp7]"=&f"(ftmp[7])
  907. : [dc]"r"((uint8_t*)dc)
  908. : "memory"
  909. );
  910. block[0][0][0] = (dc[0] + dc[3] + 3 + dc[1] + dc[2]) >> 3;
  911. block[0][1][0] = (dc[0] - dc[3] + 3 + dc[1] - dc[2]) >> 3;
  912. block[0][2][0] = (dc[0] + dc[3] + 3 - dc[1] - dc[2]) >> 3;
  913. block[0][3][0] = (dc[0] - dc[3] + 3 - dc[1] + dc[2]) >> 3;
  914. block[1][0][0] = (dc[4] + dc[7] + 3 + dc[5] + dc[6]) >> 3;
  915. block[1][1][0] = (dc[4] - dc[7] + 3 + dc[5] - dc[6]) >> 3;
  916. block[1][2][0] = (dc[4] + dc[7] + 3 - dc[5] - dc[6]) >> 3;
  917. block[1][3][0] = (dc[4] - dc[7] + 3 - dc[5] + dc[6]) >> 3;
  918. block[2][0][0] = (dc[8] + dc[11] + 3 + dc[9] + dc[10]) >> 3;
  919. block[2][1][0] = (dc[8] - dc[11] + 3 + dc[9] - dc[10]) >> 3;
  920. block[2][2][0] = (dc[8] + dc[11] + 3 - dc[9] - dc[10]) >> 3;
  921. block[2][3][0] = (dc[8] - dc[11] + 3 - dc[9] + dc[10]) >> 3;
  922. block[3][0][0] = (dc[12] + dc[15] + 3 + dc[13] + dc[14]) >> 3;
  923. block[3][1][0] = (dc[12] - dc[15] + 3 + dc[13] - dc[14]) >> 3;
  924. block[3][2][0] = (dc[12] + dc[15] + 3 - dc[13] - dc[14]) >> 3;
  925. block[3][3][0] = (dc[12] - dc[15] + 3 - dc[13] + dc[14]) >> 3;
  926. __asm__ volatile (
  927. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  928. MMI_SDC1(%[ftmp0], %[dc], 0x00)
  929. MMI_SDC1(%[ftmp0], %[dc], 0x08)
  930. MMI_SDC1(%[ftmp0], %[dc], 0x10)
  931. MMI_SDC1(%[ftmp0], %[dc], 0x18)
  932. : RESTRICT_ASM_ALL64
  933. [ftmp0]"=&f"(ftmp[0])
  934. : [dc]"r"((uint8_t *)dc)
  935. : "memory"
  936. );
  937. #else
  938. int t00, t01, t02, t03, t10, t11, t12, t13, t20, t21, t22, t23, t30, t31, t32, t33;
  939. t00 = dc[0] + dc[12];
  940. t10 = dc[1] + dc[13];
  941. t20 = dc[2] + dc[14];
  942. t30 = dc[3] + dc[15];
  943. t03 = dc[0] - dc[12];
  944. t13 = dc[1] - dc[13];
  945. t23 = dc[2] - dc[14];
  946. t33 = dc[3] - dc[15];
  947. t01 = dc[4] + dc[ 8];
  948. t11 = dc[5] + dc[ 9];
  949. t21 = dc[6] + dc[10];
  950. t31 = dc[7] + dc[11];
  951. t02 = dc[4] - dc[ 8];
  952. t12 = dc[5] - dc[ 9];
  953. t22 = dc[6] - dc[10];
  954. t32 = dc[7] - dc[11];
  955. dc[ 0] = t00 + t01;
  956. dc[ 1] = t10 + t11;
  957. dc[ 2] = t20 + t21;
  958. dc[ 3] = t30 + t31;
  959. dc[ 4] = t03 + t02;
  960. dc[ 5] = t13 + t12;
  961. dc[ 6] = t23 + t22;
  962. dc[ 7] = t33 + t32;
  963. dc[ 8] = t00 - t01;
  964. dc[ 9] = t10 - t11;
  965. dc[10] = t20 - t21;
  966. dc[11] = t30 - t31;
  967. dc[12] = t03 - t02;
  968. dc[13] = t13 - t12;
  969. dc[14] = t23 - t22;
  970. dc[15] = t33 - t32;
  971. block[0][0][0] = (dc[0] + dc[3] + 3 + dc[1] + dc[2]) >> 3;
  972. block[0][1][0] = (dc[0] - dc[3] + 3 + dc[1] - dc[2]) >> 3;
  973. block[0][2][0] = (dc[0] + dc[3] + 3 - dc[1] - dc[2]) >> 3;
  974. block[0][3][0] = (dc[0] - dc[3] + 3 - dc[1] + dc[2]) >> 3;
  975. block[1][0][0] = (dc[4] + dc[7] + 3 + dc[5] + dc[6]) >> 3;
  976. block[1][1][0] = (dc[4] - dc[7] + 3 + dc[5] - dc[6]) >> 3;
  977. block[1][2][0] = (dc[4] + dc[7] + 3 - dc[5] - dc[6]) >> 3;
  978. block[1][3][0] = (dc[4] - dc[7] + 3 - dc[5] + dc[6]) >> 3;
  979. block[2][0][0] = (dc[8] + dc[11] + 3 + dc[9] + dc[10]) >> 3;
  980. block[2][1][0] = (dc[8] - dc[11] + 3 + dc[9] - dc[10]) >> 3;
  981. block[2][2][0] = (dc[8] + dc[11] + 3 - dc[9] - dc[10]) >> 3;
  982. block[2][3][0] = (dc[8] - dc[11] + 3 - dc[9] + dc[10]) >> 3;
  983. block[3][0][0] = (dc[12] + dc[15] + 3 + dc[13] + dc[14]) >> 3;
  984. block[3][1][0] = (dc[12] - dc[15] + 3 + dc[13] - dc[14]) >> 3;
  985. block[3][2][0] = (dc[12] + dc[15] + 3 - dc[13] - dc[14]) >> 3;
  986. block[3][3][0] = (dc[12] - dc[15] + 3 - dc[13] + dc[14]) >> 3;
  987. AV_ZERO64(dc + 0);
  988. AV_ZERO64(dc + 4);
  989. AV_ZERO64(dc + 8);
  990. AV_ZERO64(dc + 12);
  991. #endif
  992. }
  993. void ff_vp8_luma_dc_wht_dc_mmi(int16_t block[4][4][16], int16_t dc[16])
  994. {
  995. int val = (dc[0] + 3) >> 3;
  996. dc[0] = 0;
  997. block[0][0][0] = val;
  998. block[0][1][0] = val;
  999. block[0][2][0] = val;
  1000. block[0][3][0] = val;
  1001. block[1][0][0] = val;
  1002. block[1][1][0] = val;
  1003. block[1][2][0] = val;
  1004. block[1][3][0] = val;
  1005. block[2][0][0] = val;
  1006. block[2][1][0] = val;
  1007. block[2][2][0] = val;
  1008. block[2][3][0] = val;
  1009. block[3][0][0] = val;
  1010. block[3][1][0] = val;
  1011. block[3][2][0] = val;
  1012. block[3][3][0] = val;
  1013. }
  1014. void ff_vp8_idct_add_mmi(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  1015. {
  1016. #if 1
  1017. double ftmp[12];
  1018. uint32_t tmp[1];
  1019. union av_intfloat64 ff_ph_4e7b_u;
  1020. union av_intfloat64 ff_ph_22a3_u;
  1021. DECLARE_VAR_LOW32;
  1022. DECLARE_VAR_ALL64;
  1023. ff_ph_4e7b_u.i = 0x4e7b4e7b4e7b4e7bULL;
  1024. ff_ph_22a3_u.i = 0x22a322a322a322a3ULL;
  1025. __asm__ volatile (
  1026. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1027. MMI_LDC1(%[ftmp1], %[block], 0x00)
  1028. MMI_LDC1(%[ftmp2], %[block], 0x08)
  1029. MMI_LDC1(%[ftmp3], %[block], 0x10)
  1030. MMI_LDC1(%[ftmp4], %[block], 0x18)
  1031. "li %[tmp0], 0x02 \n\t"
  1032. "mtc1 %[tmp0], %[ftmp11] \n\t"
  1033. // block[0...3] + block[8...11]
  1034. "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
  1035. // block[0...3] - block[8...11]
  1036. "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
  1037. // MUL_35468(block[12...15])
  1038. "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
  1039. "pmulhh %[ftmp7], %[ftmp9], %[ff_ph_22a3] \n\t"
  1040. // MUL_35468(block[4...7])
  1041. "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
  1042. "pmulhh %[ftmp8], %[ftmp9], %[ff_ph_22a3] \n\t"
  1043. // MUL_20091(block[4...7]
  1044. "pmulhh %[ftmp9], %[ftmp2], %[ff_ph_4e7b] \n\t"
  1045. "paddh %[ftmp9], %[ftmp9], %[ftmp2] \n\t"
  1046. // MUL_20091(block[12...15])
  1047. "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
  1048. "paddh %[ftmp10], %[ftmp10], %[ftmp4] \n\t"
  1049. // tmp[0 4 8 12]
  1050. "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
  1051. "paddh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
  1052. // tmp[1 5 9 13]
  1053. "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
  1054. "psubh %[ftmp2], %[ftmp2], %[ftmp10] \n\t"
  1055. // tmp[2 6 10 14]
  1056. "psubh %[ftmp3], %[ftmp6], %[ftmp8] \n\t"
  1057. "paddh %[ftmp3], %[ftmp3], %[ftmp10] \n\t"
  1058. // tmp[3 7 11 15]
  1059. "psubh %[ftmp4], %[ftmp5], %[ftmp7] \n\t"
  1060. "psubh %[ftmp4], %[ftmp4], %[ftmp9] \n\t"
  1061. MMI_SDC1(%[ftmp0], %[block], 0x00)
  1062. MMI_SDC1(%[ftmp0], %[block], 0x08)
  1063. MMI_SDC1(%[ftmp0], %[block], 0x10)
  1064. MMI_SDC1(%[ftmp0], %[block], 0x18)
  1065. TRANSPOSE_4H(%[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4],
  1066. %[ftmp5], %[ftmp6], %[ftmp7], %[ftmp8])
  1067. // t[0 4 8 12]
  1068. "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
  1069. // t[1 5 9 13]
  1070. "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
  1071. // t[2 6 10 14]
  1072. "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
  1073. "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
  1074. "psubh %[ftmp7], %[ftmp9], %[ftmp4] \n\t"
  1075. "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
  1076. "psubh %[ftmp7], %[ftmp7], %[ftmp10] \n\t"
  1077. // t[3 7 11 15]
  1078. "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
  1079. "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
  1080. "paddh %[ftmp8], %[ftmp9], %[ftmp2] \n\t"
  1081. "pmulhh %[ftmp10], %[ftmp2], %[ff_ph_4e7b] \n\t"
  1082. "paddh %[ftmp8], %[ftmp8], %[ftmp10] \n\t"
  1083. "li %[tmp0], 0x03 \n\t"
  1084. "mtc1 %[tmp0], %[ftmp11] \n\t"
  1085. "paddh %[ftmp1], %[ftmp5], %[ftmp8] \n\t"
  1086. "paddh %[ftmp1], %[ftmp1], %[ff_pw_4] \n\t"
  1087. "psrah %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
  1088. "paddh %[ftmp2], %[ftmp6], %[ftmp7] \n\t"
  1089. "paddh %[ftmp2], %[ftmp2], %[ff_pw_4] \n\t"
  1090. "psrah %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
  1091. "psubh %[ftmp3], %[ftmp6], %[ftmp7] \n\t"
  1092. "paddh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t"
  1093. "psrah %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
  1094. "psubh %[ftmp4], %[ftmp5], %[ftmp8] \n\t"
  1095. "paddh %[ftmp4], %[ftmp4], %[ff_pw_4] \n\t"
  1096. "psrah %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
  1097. TRANSPOSE_4H(%[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4],
  1098. %[ftmp5], %[ftmp6], %[ftmp7], %[ftmp8])
  1099. MMI_LWC1(%[ftmp5], %[dst0], 0x00)
  1100. MMI_LWC1(%[ftmp6], %[dst1], 0x00)
  1101. MMI_LWC1(%[ftmp7], %[dst2], 0x00)
  1102. MMI_LWC1(%[ftmp8], %[dst3], 0x00)
  1103. "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
  1104. "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
  1105. "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
  1106. "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
  1107. "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
  1108. "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
  1109. "paddh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
  1110. "paddh %[ftmp4], %[ftmp4], %[ftmp8] \n\t"
  1111. "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
  1112. "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
  1113. "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
  1114. "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
  1115. MMI_SWC1(%[ftmp1], %[dst0], 0x00)
  1116. MMI_SWC1(%[ftmp2], %[dst1], 0x00)
  1117. MMI_SWC1(%[ftmp3], %[dst2], 0x00)
  1118. MMI_SWC1(%[ftmp4], %[dst3], 0x00)
  1119. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1120. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1121. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1122. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1123. [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
  1124. [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
  1125. RESTRICT_ASM_LOW32
  1126. RESTRICT_ASM_ALL64
  1127. [tmp0]"=&r"(tmp[0])
  1128. : [dst0]"r"(dst), [dst1]"r"(dst+stride),
  1129. [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride),
  1130. [block]"r"(block), [ff_pw_4]"f"(ff_pw_4.f),
  1131. [ff_ph_4e7b]"f"(ff_ph_4e7b_u.f), [ff_ph_22a3]"f"(ff_ph_22a3_u.f)
  1132. : "memory"
  1133. );
  1134. #else
  1135. int i, t0, t1, t2, t3;
  1136. int16_t tmp[16];
  1137. for (i = 0; i < 4; i++) {
  1138. t0 = block[0 + i] + block[8 + i];
  1139. t1 = block[0 + i] - block[8 + i];
  1140. t2 = MUL_35468(block[4 + i]) - MUL_20091(block[12 + i]);
  1141. t3 = MUL_20091(block[4 + i]) + MUL_35468(block[12 + i]);
  1142. block[ 0 + i] = 0;
  1143. block[ 4 + i] = 0;
  1144. block[ 8 + i] = 0;
  1145. block[12 + i] = 0;
  1146. tmp[i * 4 + 0] = t0 + t3;
  1147. tmp[i * 4 + 1] = t1 + t2;
  1148. tmp[i * 4 + 2] = t1 - t2;
  1149. tmp[i * 4 + 3] = t0 - t3;
  1150. }
  1151. for (i = 0; i < 4; i++) {
  1152. t0 = tmp[0 + i] + tmp[8 + i];
  1153. t1 = tmp[0 + i] - tmp[8 + i];
  1154. t2 = MUL_35468(tmp[4 + i]) - MUL_20091(tmp[12 + i]);
  1155. t3 = MUL_20091(tmp[4 + i]) + MUL_35468(tmp[12 + i]);
  1156. dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
  1157. dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
  1158. dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
  1159. dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
  1160. dst += stride;
  1161. }
  1162. #endif
  1163. }
  1164. void ff_vp8_idct_dc_add_mmi(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  1165. {
  1166. #if 1
  1167. int dc = (block[0] + 4) >> 3;
  1168. double ftmp[6];
  1169. DECLARE_VAR_LOW32;
  1170. block[0] = 0;
  1171. __asm__ volatile (
  1172. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1173. "mtc1 %[dc], %[ftmp5] \n\t"
  1174. MMI_LWC1(%[ftmp1], %[dst0], 0x00)
  1175. MMI_LWC1(%[ftmp2], %[dst1], 0x00)
  1176. MMI_LWC1(%[ftmp3], %[dst2], 0x00)
  1177. MMI_LWC1(%[ftmp4], %[dst3], 0x00)
  1178. "pshufh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
  1179. "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
  1180. "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
  1181. "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
  1182. "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
  1183. "paddsh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
  1184. "paddsh %[ftmp2], %[ftmp2], %[ftmp5] \n\t"
  1185. "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t"
  1186. "paddsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t"
  1187. "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
  1188. "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
  1189. "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
  1190. "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
  1191. MMI_SWC1(%[ftmp1], %[dst0], 0x00)
  1192. MMI_SWC1(%[ftmp2], %[dst1], 0x00)
  1193. MMI_SWC1(%[ftmp3], %[dst2], 0x00)
  1194. MMI_SWC1(%[ftmp4], %[dst3], 0x00)
  1195. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1196. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1197. [ftmp4]"=&f"(ftmp[4]),
  1198. RESTRICT_ASM_LOW32
  1199. [ftmp5]"=&f"(ftmp[5])
  1200. : [dst0]"r"(dst), [dst1]"r"(dst+stride),
  1201. [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride),
  1202. [dc]"r"(dc)
  1203. : "memory"
  1204. );
  1205. #else
  1206. int i, dc = (block[0] + 4) >> 3;
  1207. block[0] = 0;
  1208. for (i = 0; i < 4; i++) {
  1209. dst[0] = av_clip_uint8(dst[0] + dc);
  1210. dst[1] = av_clip_uint8(dst[1] + dc);
  1211. dst[2] = av_clip_uint8(dst[2] + dc);
  1212. dst[3] = av_clip_uint8(dst[3] + dc);
  1213. dst += stride;
  1214. }
  1215. #endif
  1216. }
  1217. void ff_vp8_idct_dc_add4y_mmi(uint8_t *dst, int16_t block[4][16],
  1218. ptrdiff_t stride)
  1219. {
  1220. ff_vp8_idct_dc_add_mmi(dst + 0, block[0], stride);
  1221. ff_vp8_idct_dc_add_mmi(dst + 4, block[1], stride);
  1222. ff_vp8_idct_dc_add_mmi(dst + 8, block[2], stride);
  1223. ff_vp8_idct_dc_add_mmi(dst + 12, block[3], stride);
  1224. }
  1225. void ff_vp8_idct_dc_add4uv_mmi(uint8_t *dst, int16_t block[4][16],
  1226. ptrdiff_t stride)
  1227. {
  1228. ff_vp8_idct_dc_add_mmi(dst + stride * 0 + 0, block[0], stride);
  1229. ff_vp8_idct_dc_add_mmi(dst + stride * 0 + 4, block[1], stride);
  1230. ff_vp8_idct_dc_add_mmi(dst + stride * 4 + 0, block[2], stride);
  1231. ff_vp8_idct_dc_add_mmi(dst + stride * 4 + 4, block[3], stride);
  1232. }
  1233. // loop filter applied to edges between macroblocks
  1234. void ff_vp8_v_loop_filter16_mmi(uint8_t *dst, ptrdiff_t stride, int flim_E,
  1235. int flim_I, int hev_thresh)
  1236. {
  1237. vp8_v_loop_filter8_mmi(dst, stride, flim_E, flim_I, hev_thresh);
  1238. vp8_v_loop_filter8_mmi(dst + 8, stride, flim_E, flim_I, hev_thresh);
  1239. }
  1240. void ff_vp8_h_loop_filter16_mmi(uint8_t *dst, ptrdiff_t stride, int flim_E,
  1241. int flim_I, int hev_thresh)
  1242. {
  1243. vp8_h_loop_filter8_mmi(dst, stride, flim_E, flim_I, hev_thresh);
  1244. vp8_h_loop_filter8_mmi(dst + 8 * stride, stride, flim_E, flim_I,
  1245. hev_thresh);
  1246. }
  1247. void ff_vp8_v_loop_filter8uv_mmi(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
  1248. int flim_E, int flim_I, int hev_thresh)
  1249. {
  1250. vp8_v_loop_filter8_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
  1251. vp8_v_loop_filter8_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
  1252. }
  1253. void ff_vp8_h_loop_filter8uv_mmi(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
  1254. int flim_E, int flim_I, int hev_thresh)
  1255. {
  1256. vp8_h_loop_filter8_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
  1257. vp8_h_loop_filter8_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
  1258. }
  1259. // loop filter applied to inner macroblock edges
  1260. void ff_vp8_v_loop_filter16_inner_mmi(uint8_t *dst, ptrdiff_t stride,
  1261. int flim_E, int flim_I, int hev_thresh)
  1262. {
  1263. int i;
  1264. for (i = 0; i < 16; i++)
  1265. if (vp8_normal_limit(dst + i * 1, stride, flim_E, flim_I)) {
  1266. int hv = hev(dst + i * 1, stride, hev_thresh);
  1267. if (hv)
  1268. vp8_filter_common_is4tap(dst + i * 1, stride);
  1269. else
  1270. vp8_filter_common_isnot4tap(dst + i * 1, stride);
  1271. }
  1272. }
  1273. void ff_vp8_h_loop_filter16_inner_mmi(uint8_t *dst, ptrdiff_t stride,
  1274. int flim_E, int flim_I, int hev_thresh)
  1275. {
  1276. int i;
  1277. for (i = 0; i < 16; i++)
  1278. if (vp8_normal_limit(dst + i * stride, 1, flim_E, flim_I)) {
  1279. int hv = hev(dst + i * stride, 1, hev_thresh);
  1280. if (hv)
  1281. vp8_filter_common_is4tap(dst + i * stride, 1);
  1282. else
  1283. vp8_filter_common_isnot4tap(dst + i * stride, 1);
  1284. }
  1285. }
  1286. void ff_vp8_v_loop_filter8uv_inner_mmi(uint8_t *dstU, uint8_t *dstV,
  1287. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  1288. {
  1289. vp8_v_loop_filter8_inner_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
  1290. vp8_v_loop_filter8_inner_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
  1291. }
  1292. void ff_vp8_h_loop_filter8uv_inner_mmi(uint8_t *dstU, uint8_t *dstV,
  1293. ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
  1294. {
  1295. vp8_h_loop_filter8_inner_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
  1296. vp8_h_loop_filter8_inner_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
  1297. }
  1298. void ff_vp8_v_loop_filter_simple_mmi(uint8_t *dst, ptrdiff_t stride, int flim)
  1299. {
  1300. int i;
  1301. for (i = 0; i < 16; i++)
  1302. if (vp8_simple_limit(dst + i, stride, flim))
  1303. vp8_filter_common_is4tap(dst + i, stride);
  1304. }
  1305. void ff_vp8_h_loop_filter_simple_mmi(uint8_t *dst, ptrdiff_t stride, int flim)
  1306. {
  1307. int i;
  1308. for (i = 0; i < 16; i++)
  1309. if (vp8_simple_limit(dst + i * stride, 1, flim))
  1310. vp8_filter_common_is4tap(dst + i * stride, 1);
  1311. }
  1312. void ff_put_vp8_pixels16_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1313. ptrdiff_t srcstride, int h, int x, int y)
  1314. {
  1315. #if 1
  1316. double ftmp[2];
  1317. uint64_t tmp[2];
  1318. mips_reg addr[2];
  1319. DECLARE_VAR_ALL64;
  1320. __asm__ volatile (
  1321. "1: \n\t"
  1322. PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
  1323. MMI_ULDC1(%[ftmp0], %[src], 0x00)
  1324. "ldl %[tmp0], 0x0f(%[src]) \n\t"
  1325. "ldr %[tmp0], 0x08(%[src]) \n\t"
  1326. MMI_ULDC1(%[ftmp1], %[addr0], 0x00)
  1327. "ldl %[tmp1], 0x0f(%[addr0]) \n\t"
  1328. "ldr %[tmp1], 0x08(%[addr0]) \n\t"
  1329. PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
  1330. MMI_SDC1(%[ftmp0], %[dst], 0x00)
  1331. "sdl %[tmp0], 0x0f(%[dst]) \n\t"
  1332. "sdr %[tmp0], 0x08(%[dst]) \n\t"
  1333. "addiu %[h], %[h], -0x02 \n\t"
  1334. MMI_SDC1(%[ftmp1], %[addr1], 0x00)
  1335. PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
  1336. "sdl %[tmp1], 0x0f(%[addr1]) \n\t"
  1337. "sdr %[tmp1], 0x08(%[addr1]) \n\t"
  1338. PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
  1339. "bnez %[h], 1b \n\t"
  1340. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1341. [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
  1342. RESTRICT_ASM_ALL64
  1343. [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
  1344. [dst]"+&r"(dst), [src]"+&r"(src),
  1345. [h]"+&r"(h)
  1346. : [dststride]"r"((mips_reg)dststride),
  1347. [srcstride]"r"((mips_reg)srcstride)
  1348. : "memory"
  1349. );
  1350. #else
  1351. int i;
  1352. for (i = 0; i < h; i++, dst += dststride, src += srcstride)
  1353. memcpy(dst, src, 16);
  1354. #endif
  1355. }
  1356. void ff_put_vp8_pixels8_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1357. ptrdiff_t srcstride, int h, int x, int y)
  1358. {
  1359. #if 1
  1360. double ftmp[1];
  1361. uint64_t tmp[1];
  1362. mips_reg addr[2];
  1363. DECLARE_VAR_ALL64;
  1364. __asm__ volatile (
  1365. "1: \n\t"
  1366. PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
  1367. MMI_ULDC1(%[ftmp0], %[src], 0x00)
  1368. "ldl %[tmp0], 0x07(%[addr0]) \n\t"
  1369. "ldr %[tmp0], 0x00(%[addr0]) \n\t"
  1370. PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
  1371. MMI_SDC1(%[ftmp0], %[dst], 0x00)
  1372. "addiu %[h], %[h], -0x02 \n\t"
  1373. "sdl %[tmp0], 0x07(%[addr1]) \n\t"
  1374. "sdr %[tmp0], 0x00(%[addr1]) \n\t"
  1375. PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
  1376. PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
  1377. "bnez %[h], 1b \n\t"
  1378. : [ftmp0]"=&f"(ftmp[0]), [tmp0]"=&r"(tmp[0]),
  1379. RESTRICT_ASM_ALL64
  1380. [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
  1381. [dst]"+&r"(dst), [src]"+&r"(src),
  1382. [h]"+&r"(h)
  1383. : [dststride]"r"((mips_reg)dststride),
  1384. [srcstride]"r"((mips_reg)srcstride)
  1385. : "memory"
  1386. );
  1387. #else
  1388. int i;
  1389. for (i = 0; i < h; i++, dst += dststride, src += srcstride)
  1390. memcpy(dst, src, 8);
  1391. #endif
  1392. }
  1393. void ff_put_vp8_pixels4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1394. ptrdiff_t srcstride, int h, int x, int y)
  1395. {
  1396. #if 1
  1397. double ftmp[1];
  1398. uint64_t tmp[1];
  1399. mips_reg addr[2];
  1400. DECLARE_VAR_LOW32;
  1401. __asm__ volatile (
  1402. "1: \n\t"
  1403. PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
  1404. MMI_LWC1(%[ftmp0], %[src], 0x00)
  1405. "lwl %[tmp0], 0x03(%[addr0]) \n\t"
  1406. "lwr %[tmp0], 0x00(%[addr0]) \n\t"
  1407. PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
  1408. MMI_SWC1(%[ftmp0], %[dst], 0x00)
  1409. "addiu %[h], %[h], -0x02 \n\t"
  1410. "swl %[tmp0], 0x03(%[addr1]) \n\t"
  1411. "swr %[tmp0], 0x00(%[addr1]) \n\t"
  1412. PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
  1413. PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
  1414. "bnez %[h], 1b \n\t"
  1415. : [ftmp0]"=&f"(ftmp[0]), [tmp0]"=&r"(tmp[0]),
  1416. RESTRICT_ASM_LOW32
  1417. [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
  1418. [dst]"+&r"(dst), [src]"+&r"(src),
  1419. [h]"+&r"(h)
  1420. : [dststride]"r"((mips_reg)dststride),
  1421. [srcstride]"r"((mips_reg)srcstride)
  1422. : "memory"
  1423. );
  1424. #else
  1425. int i;
  1426. for (i = 0; i < h; i++, dst += dststride, src += srcstride)
  1427. memcpy(dst, src, 4);
  1428. #endif
  1429. }
  1430. void ff_put_vp8_epel16_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1431. ptrdiff_t srcstride, int h, int mx, int my)
  1432. {
  1433. #if 1
  1434. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1435. double ftmp[9];
  1436. uint32_t tmp[1];
  1437. union av_intfloat64 filter1;
  1438. union av_intfloat64 filter2;
  1439. union av_intfloat64 filter3;
  1440. union av_intfloat64 filter4;
  1441. mips_reg src1, dst1;
  1442. DECLARE_VAR_ALL64;
  1443. filter1.i = filter[1];
  1444. filter2.i = filter[2];
  1445. filter3.i = filter[3];
  1446. filter4.i = filter[4];
  1447. /*
  1448. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
  1449. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
  1450. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
  1451. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
  1452. dst[4] = cm[(filter[2] * src[4] - filter[1] * src[ 3] + filter[3] * src[5] - filter[4] * src[6] + 64) >> 7];
  1453. dst[5] = cm[(filter[2] * src[5] - filter[1] * src[ 4] + filter[3] * src[6] - filter[4] * src[7] + 64) >> 7];
  1454. dst[6] = cm[(filter[2] * src[6] - filter[1] * src[ 5] + filter[3] * src[7] - filter[4] * src[8] + 64) >> 7];
  1455. dst[7] = cm[(filter[2] * src[7] - filter[1] * src[ 6] + filter[3] * src[8] - filter[4] * src[9] + 64) >> 7];
  1456. dst[ 8] = cm[(filter[2] * src[ 8] - filter[1] * src[ 7] + filter[3] * src[ 9] - filter[4] * src[10] + 64) >> 7];
  1457. dst[ 9] = cm[(filter[2] * src[ 9] - filter[1] * src[ 8] + filter[3] * src[10] - filter[4] * src[11] + 64) >> 7];
  1458. dst[10] = cm[(filter[2] * src[10] - filter[1] * src[ 9] + filter[3] * src[11] - filter[4] * src[12] + 64) >> 7];
  1459. dst[11] = cm[(filter[2] * src[11] - filter[1] * src[10] + filter[3] * src[12] - filter[4] * src[13] + 64) >> 7];
  1460. dst[12] = cm[(filter[2] * src[12] - filter[1] * src[11] + filter[3] * src[13] - filter[4] * src[14] + 64) >> 7];
  1461. dst[13] = cm[(filter[2] * src[13] - filter[1] * src[12] + filter[3] * src[14] - filter[4] * src[15] + 64) >> 7];
  1462. dst[14] = cm[(filter[2] * src[14] - filter[1] * src[13] + filter[3] * src[15] - filter[4] * src[16] + 64) >> 7];
  1463. dst[15] = cm[(filter[2] * src[15] - filter[1] * src[14] + filter[3] * src[16] - filter[4] * src[17] + 64) >> 7];
  1464. */
  1465. __asm__ volatile (
  1466. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1467. "li %[tmp0], 0x07 \n\t"
  1468. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1469. "1: \n\t"
  1470. // 0 - 7
  1471. PUT_VP8_EPEL8_H4_MMI(%[src], %[dst])
  1472. PTR_ADDIU "%[src1], %[src], 0x08 \n\t"
  1473. PTR_ADDIU "%[dst1], %[dst], 0x08 \n\t"
  1474. // 8 - 15
  1475. PUT_VP8_EPEL8_H4_MMI(%[src1], %[dst1])
  1476. "addiu %[h], %[h], -0x01 \n\t"
  1477. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1478. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1479. "bnez %[h], 1b \n\t"
  1480. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1481. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1482. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1483. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1484. [ftmp8]"=&f"(ftmp[8]),
  1485. [tmp0]"=&r"(tmp[0]),
  1486. RESTRICT_ASM_ALL64
  1487. [dst1]"=&r"(dst1), [src1]"=&r"(src1),
  1488. [h]"+&r"(h),
  1489. [dst]"+&r"(dst), [src]"+&r"(src)
  1490. : [ff_pw_64]"f"(ff_pw_64.f),
  1491. [srcstride]"r"((mips_reg)srcstride),
  1492. [dststride]"r"((mips_reg)dststride),
  1493. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  1494. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  1495. : "memory"
  1496. );
  1497. #else
  1498. const uint8_t *filter = subpel_filters[mx - 1];
  1499. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1500. int x, y;
  1501. for (y = 0; y < h; y++) {
  1502. for (x = 0; x < 16; x++)
  1503. dst[x] = FILTER_4TAP(src, filter, 1);
  1504. dst += dststride;
  1505. src += srcstride;
  1506. }
  1507. #endif
  1508. }
  1509. void ff_put_vp8_epel8_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1510. ptrdiff_t srcstride, int h, int mx, int my)
  1511. {
  1512. #if 1
  1513. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1514. double ftmp[9];
  1515. uint32_t tmp[1];
  1516. union av_intfloat64 filter1;
  1517. union av_intfloat64 filter2;
  1518. union av_intfloat64 filter3;
  1519. union av_intfloat64 filter4;
  1520. DECLARE_VAR_ALL64;
  1521. filter1.i = filter[1];
  1522. filter2.i = filter[2];
  1523. filter3.i = filter[3];
  1524. filter4.i = filter[4];
  1525. /*
  1526. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
  1527. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
  1528. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
  1529. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
  1530. dst[4] = cm[(filter[2] * src[4] - filter[1] * src[ 3] + filter[3] * src[5] - filter[4] * src[6] + 64) >> 7];
  1531. dst[5] = cm[(filter[2] * src[5] - filter[1] * src[ 4] + filter[3] * src[6] - filter[4] * src[7] + 64) >> 7];
  1532. dst[6] = cm[(filter[2] * src[6] - filter[1] * src[ 5] + filter[3] * src[7] - filter[4] * src[8] + 64) >> 7];
  1533. dst[7] = cm[(filter[2] * src[7] - filter[1] * src[ 6] + filter[3] * src[8] - filter[4] * src[9] + 64) >> 7];
  1534. */
  1535. __asm__ volatile (
  1536. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1537. "li %[tmp0], 0x07 \n\t"
  1538. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1539. "1: \n\t"
  1540. PUT_VP8_EPEL8_H4_MMI(%[src], %[dst])
  1541. "addiu %[h], %[h], -0x01 \n\t"
  1542. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1543. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1544. "bnez %[h], 1b \n\t"
  1545. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1546. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1547. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1548. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1549. [ftmp8]"=&f"(ftmp[8]),
  1550. [tmp0]"=&r"(tmp[0]),
  1551. RESTRICT_ASM_ALL64
  1552. [h]"+&r"(h),
  1553. [dst]"+&r"(dst), [src]"+&r"(src)
  1554. : [ff_pw_64]"f"(ff_pw_64.f),
  1555. [srcstride]"r"((mips_reg)srcstride),
  1556. [dststride]"r"((mips_reg)dststride),
  1557. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  1558. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  1559. : "memory"
  1560. );
  1561. #else
  1562. const uint8_t *filter = subpel_filters[mx - 1];
  1563. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1564. int x, y;
  1565. for (y = 0; y < h; y++) {
  1566. for (x = 0; x < 8; x++)
  1567. dst[x] = FILTER_4TAP(src, filter, 1);
  1568. dst += dststride;
  1569. src += srcstride;
  1570. }
  1571. #endif
  1572. }
  1573. void ff_put_vp8_epel4_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1574. ptrdiff_t srcstride, int h, int mx, int my)
  1575. {
  1576. #if 1
  1577. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1578. double ftmp[6];
  1579. uint32_t tmp[1];
  1580. union av_intfloat64 filter1;
  1581. union av_intfloat64 filter2;
  1582. union av_intfloat64 filter3;
  1583. union av_intfloat64 filter4;
  1584. DECLARE_VAR_LOW32;
  1585. filter1.i = filter[1];
  1586. filter2.i = filter[2];
  1587. filter3.i = filter[3];
  1588. filter4.i = filter[4];
  1589. /*
  1590. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
  1591. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
  1592. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
  1593. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
  1594. */
  1595. __asm__ volatile (
  1596. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1597. "li %[tmp0], 0x07 \n\t"
  1598. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1599. "1: \n\t"
  1600. PUT_VP8_EPEL4_H4_MMI(%[src], %[dst])
  1601. "addiu %[h], %[h], -0x01 \n\t"
  1602. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1603. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1604. "bnez %[h], 1b \n\t"
  1605. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1606. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1607. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1608. [tmp0]"=&r"(tmp[0]),
  1609. RESTRICT_ASM_LOW32
  1610. [h]"+&r"(h),
  1611. [dst]"+&r"(dst), [src]"+&r"(src)
  1612. : [ff_pw_64]"f"(ff_pw_64.f),
  1613. [srcstride]"r"((mips_reg)srcstride),
  1614. [dststride]"r"((mips_reg)dststride),
  1615. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  1616. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  1617. : "memory"
  1618. );
  1619. #else
  1620. const uint8_t *filter = subpel_filters[mx - 1];
  1621. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1622. int x, y;
  1623. for (y = 0; y < h; y++) {
  1624. for (x = 0; x < 4; x++)
  1625. dst[x] = FILTER_4TAP(src, filter, 1);
  1626. dst += dststride;
  1627. src += srcstride;
  1628. }
  1629. #endif
  1630. }
  1631. void ff_put_vp8_epel16_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1632. ptrdiff_t srcstride, int h, int mx, int my)
  1633. {
  1634. #if 1
  1635. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1636. double ftmp[9];
  1637. uint32_t tmp[1];
  1638. mips_reg src1, dst1;
  1639. union av_intfloat64 filter0;
  1640. union av_intfloat64 filter1;
  1641. union av_intfloat64 filter2;
  1642. union av_intfloat64 filter3;
  1643. union av_intfloat64 filter4;
  1644. union av_intfloat64 filter5;
  1645. DECLARE_VAR_ALL64;
  1646. filter0.i = filter[0];
  1647. filter1.i = filter[1];
  1648. filter2.i = filter[2];
  1649. filter3.i = filter[3];
  1650. filter4.i = filter[4];
  1651. filter5.i = filter[5];
  1652. /*
  1653. dst[ 0] = cm[(filter[2]*src[ 0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[ 1] - filter[4]*src[ 2] + filter[5]*src[ 3] + 64) >> 7];
  1654. dst[ 1] = cm[(filter[2]*src[ 1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[ 2] - filter[4]*src[ 3] + filter[5]*src[ 4] + 64) >> 7];
  1655. dst[ 2] = cm[(filter[2]*src[ 2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[ 3] - filter[4]*src[ 4] + filter[5]*src[ 5] + 64) >> 7];
  1656. dst[ 3] = cm[(filter[2]*src[ 3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[ 4] - filter[4]*src[ 5] + filter[5]*src[ 6] + 64) >> 7];
  1657. dst[ 4] = cm[(filter[2]*src[ 4] - filter[1]*src[ 3] + filter[0]*src[ 2] + filter[3]*src[ 5] - filter[4]*src[ 6] + filter[5]*src[ 7] + 64) >> 7];
  1658. dst[ 5] = cm[(filter[2]*src[ 5] - filter[1]*src[ 4] + filter[0]*src[ 3] + filter[3]*src[ 6] - filter[4]*src[ 7] + filter[5]*src[ 8] + 64) >> 7];
  1659. dst[ 6] = cm[(filter[2]*src[ 6] - filter[1]*src[ 5] + filter[0]*src[ 4] + filter[3]*src[ 7] - filter[4]*src[ 8] + filter[5]*src[ 9] + 64) >> 7];
  1660. dst[ 7] = cm[(filter[2]*src[ 7] - filter[1]*src[ 6] + filter[0]*src[ 5] + filter[3]*src[ 8] - filter[4]*src[ 9] + filter[5]*src[10] + 64) >> 7];
  1661. dst[ 8] = cm[(filter[2]*src[ 8] - filter[1]*src[ 7] + filter[0]*src[ 6] + filter[3]*src[ 9] - filter[4]*src[10] + filter[5]*src[11] + 64) >> 7];
  1662. dst[ 9] = cm[(filter[2]*src[ 9] - filter[1]*src[ 8] + filter[0]*src[ 7] + filter[3]*src[10] - filter[4]*src[11] + filter[5]*src[12] + 64) >> 7];
  1663. dst[10] = cm[(filter[2]*src[10] - filter[1]*src[ 9] + filter[0]*src[ 8] + filter[3]*src[11] - filter[4]*src[12] + filter[5]*src[13] + 64) >> 7];
  1664. dst[11] = cm[(filter[2]*src[11] - filter[1]*src[10] + filter[0]*src[ 9] + filter[3]*src[12] - filter[4]*src[13] + filter[5]*src[14] + 64) >> 7];
  1665. dst[12] = cm[(filter[2]*src[12] - filter[1]*src[11] + filter[0]*src[10] + filter[3]*src[13] - filter[4]*src[14] + filter[5]*src[15] + 64) >> 7];
  1666. dst[13] = cm[(filter[2]*src[13] - filter[1]*src[12] + filter[0]*src[11] + filter[3]*src[14] - filter[4]*src[15] + filter[5]*src[16] + 64) >> 7];
  1667. dst[14] = cm[(filter[2]*src[14] - filter[1]*src[13] + filter[0]*src[12] + filter[3]*src[15] - filter[4]*src[16] + filter[5]*src[17] + 64) >> 7];
  1668. dst[15] = cm[(filter[2]*src[15] - filter[1]*src[14] + filter[0]*src[13] + filter[3]*src[16] - filter[4]*src[17] + filter[5]*src[18] + 64) >> 7];
  1669. */
  1670. __asm__ volatile (
  1671. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1672. "li %[tmp0], 0x07 \n\t"
  1673. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1674. "1: \n\t"
  1675. // 0 - 7
  1676. PUT_VP8_EPEL8_H6_MMI(%[src], %[dst])
  1677. PTR_ADDIU "%[src1], %[src], 0x08 \n\t"
  1678. PTR_ADDIU "%[dst1], %[dst], 0x08 \n\t"
  1679. // 8 - 15
  1680. PUT_VP8_EPEL8_H6_MMI(%[src1], %[dst1])
  1681. "addiu %[h], %[h], -0x01 \n\t"
  1682. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1683. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1684. "bnez %[h], 1b \n\t"
  1685. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1686. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1687. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1688. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1689. [ftmp8]"=&f"(ftmp[8]),
  1690. [tmp0]"=&r"(tmp[0]),
  1691. RESTRICT_ASM_ALL64
  1692. [dst1]"=&r"(dst1), [src1]"=&r"(src1),
  1693. [h]"+&r"(h),
  1694. [dst]"+&r"(dst), [src]"+&r"(src)
  1695. : [ff_pw_64]"f"(ff_pw_64.f),
  1696. [srcstride]"r"((mips_reg)srcstride),
  1697. [dststride]"r"((mips_reg)dststride),
  1698. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  1699. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  1700. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  1701. : "memory"
  1702. );
  1703. #else
  1704. const uint8_t *filter = subpel_filters[mx - 1];
  1705. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1706. int x, y;
  1707. for (y = 0; y < h; y++) {
  1708. for (x = 0; x < 16; x++)
  1709. dst[x] = FILTER_6TAP(src, filter, 1);
  1710. dst += dststride;
  1711. src += srcstride;
  1712. }
  1713. #endif
  1714. }
  1715. void ff_put_vp8_epel8_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1716. ptrdiff_t srcstride, int h, int mx, int my)
  1717. {
  1718. #if 1
  1719. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1720. double ftmp[9];
  1721. uint32_t tmp[1];
  1722. union av_intfloat64 filter0;
  1723. union av_intfloat64 filter1;
  1724. union av_intfloat64 filter2;
  1725. union av_intfloat64 filter3;
  1726. union av_intfloat64 filter4;
  1727. union av_intfloat64 filter5;
  1728. DECLARE_VAR_ALL64;
  1729. filter0.i = filter[0];
  1730. filter1.i = filter[1];
  1731. filter2.i = filter[2];
  1732. filter3.i = filter[3];
  1733. filter4.i = filter[4];
  1734. filter5.i = filter[5];
  1735. /*
  1736. dst[0] = cm[(filter[2]*src[0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[1] - filter[4]*src[2] + filter[5]*src[ 3] + 64) >> 7];
  1737. dst[1] = cm[(filter[2]*src[1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[2] - filter[4]*src[3] + filter[5]*src[ 4] + 64) >> 7];
  1738. dst[2] = cm[(filter[2]*src[2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[3] - filter[4]*src[4] + filter[5]*src[ 5] + 64) >> 7];
  1739. dst[3] = cm[(filter[2]*src[3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[4] - filter[4]*src[5] + filter[5]*src[ 6] + 64) >> 7];
  1740. dst[4] = cm[(filter[2]*src[4] - filter[1]*src[ 3] + filter[0]*src[ 2] + filter[3]*src[5] - filter[4]*src[6] + filter[5]*src[ 7] + 64) >> 7];
  1741. dst[5] = cm[(filter[2]*src[5] - filter[1]*src[ 4] + filter[0]*src[ 3] + filter[3]*src[6] - filter[4]*src[7] + filter[5]*src[ 8] + 64) >> 7];
  1742. dst[6] = cm[(filter[2]*src[6] - filter[1]*src[ 5] + filter[0]*src[ 4] + filter[3]*src[7] - filter[4]*src[8] + filter[5]*src[ 9] + 64) >> 7];
  1743. dst[7] = cm[(filter[2]*src[7] - filter[1]*src[ 6] + filter[0]*src[ 5] + filter[3]*src[8] - filter[4]*src[9] + filter[5]*src[10] + 64) >> 7];
  1744. */
  1745. __asm__ volatile (
  1746. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1747. "li %[tmp0], 0x07 \n\t"
  1748. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1749. "1: \n\t"
  1750. PUT_VP8_EPEL8_H6_MMI(%[src], %[dst])
  1751. "addiu %[h], %[h], -0x01 \n\t"
  1752. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1753. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1754. "bnez %[h], 1b \n\t"
  1755. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1756. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1757. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1758. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1759. [ftmp8]"=&f"(ftmp[8]),
  1760. [tmp0]"=&r"(tmp[0]),
  1761. RESTRICT_ASM_ALL64
  1762. [h]"+&r"(h),
  1763. [dst]"+&r"(dst), [src]"+&r"(src)
  1764. : [ff_pw_64]"f"(ff_pw_64.f),
  1765. [srcstride]"r"((mips_reg)srcstride),
  1766. [dststride]"r"((mips_reg)dststride),
  1767. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  1768. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  1769. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  1770. : "memory"
  1771. );
  1772. #else
  1773. const uint8_t *filter = subpel_filters[mx - 1];
  1774. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1775. int x, y;
  1776. for (y = 0; y < h; y++) {
  1777. for (x = 0; x < 8; x++)
  1778. dst[x] = FILTER_6TAP(src, filter, 1);
  1779. dst += dststride;
  1780. src += srcstride;
  1781. }
  1782. #endif
  1783. }
  1784. void ff_put_vp8_epel4_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1785. ptrdiff_t srcstride, int h, int mx, int my)
  1786. {
  1787. #if 1
  1788. const uint64_t *filter = fourtap_subpel_filters[mx - 1];
  1789. double ftmp[6];
  1790. uint32_t tmp[1];
  1791. union av_intfloat64 filter0;
  1792. union av_intfloat64 filter1;
  1793. union av_intfloat64 filter2;
  1794. union av_intfloat64 filter3;
  1795. union av_intfloat64 filter4;
  1796. union av_intfloat64 filter5;
  1797. DECLARE_VAR_LOW32;
  1798. filter0.i = filter[0];
  1799. filter1.i = filter[1];
  1800. filter2.i = filter[2];
  1801. filter3.i = filter[3];
  1802. filter4.i = filter[4];
  1803. filter5.i = filter[5];
  1804. /*
  1805. dst[0] = cm[(filter[2]*src[0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[1] - filter[4]*src[2] + filter[5]*src[ 3] + 64) >> 7];
  1806. dst[1] = cm[(filter[2]*src[1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[2] - filter[4]*src[3] + filter[5]*src[ 4] + 64) >> 7];
  1807. dst[2] = cm[(filter[2]*src[2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[3] - filter[4]*src[4] + filter[5]*src[ 5] + 64) >> 7];
  1808. dst[3] = cm[(filter[2]*src[3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[4] - filter[4]*src[5] + filter[5]*src[ 6] + 64) >> 7];
  1809. */
  1810. __asm__ volatile (
  1811. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1812. "li %[tmp0], 0x07 \n\t"
  1813. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1814. "1: \n\t"
  1815. PUT_VP8_EPEL4_H6_MMI(%[src], %[dst])
  1816. "addiu %[h], %[h], -0x01 \n\t"
  1817. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1818. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1819. "bnez %[h], 1b \n\t"
  1820. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1821. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1822. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1823. [tmp0]"=&r"(tmp[0]),
  1824. RESTRICT_ASM_LOW32
  1825. [h]"+&r"(h),
  1826. [dst]"+&r"(dst), [src]"+&r"(src)
  1827. : [ff_pw_64]"f"(ff_pw_64.f),
  1828. [srcstride]"r"((mips_reg)srcstride),
  1829. [dststride]"r"((mips_reg)dststride),
  1830. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  1831. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  1832. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  1833. : "memory"
  1834. );
  1835. #else
  1836. const uint8_t *filter = subpel_filters[mx - 1];
  1837. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1838. int x, y;
  1839. for (y = 0; y < h; y++) {
  1840. for (x = 0; x < 4; x++)
  1841. dst[x] = FILTER_6TAP(src, filter, 1);
  1842. dst += dststride;
  1843. src += srcstride;
  1844. }
  1845. #endif
  1846. }
  1847. void ff_put_vp8_epel16_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1848. ptrdiff_t srcstride, int h, int mx, int my)
  1849. {
  1850. #if 1
  1851. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  1852. double ftmp[9];
  1853. uint32_t tmp[1];
  1854. mips_reg src0, src1, dst0;
  1855. union av_intfloat64 filter1;
  1856. union av_intfloat64 filter2;
  1857. union av_intfloat64 filter3;
  1858. union av_intfloat64 filter4;
  1859. DECLARE_VAR_ALL64;
  1860. filter1.i = filter[1];
  1861. filter2.i = filter[2];
  1862. filter3.i = filter[3];
  1863. filter4.i = filter[4];
  1864. /*
  1865. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
  1866. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
  1867. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
  1868. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
  1869. dst[4] = cm[(filter[2] * src[4] - filter[1] * src[4-srcstride] + filter[3] * src[4+srcstride] - filter[4] * src[4+2*srcstride] + 64) >> 7];
  1870. dst[5] = cm[(filter[2] * src[5] - filter[1] * src[5-srcstride] + filter[3] * src[5+srcstride] - filter[4] * src[5+2*srcstride] + 64) >> 7];
  1871. dst[6] = cm[(filter[2] * src[6] - filter[1] * src[6-srcstride] + filter[3] * src[6+srcstride] - filter[4] * src[6+2*srcstride] + 64) >> 7];
  1872. dst[7] = cm[(filter[2] * src[7] - filter[1] * src[7-srcstride] + filter[3] * src[7+srcstride] - filter[4] * src[7+2*srcstride] + 64) >> 7];
  1873. dst[ 8] = cm[(filter[2] * src[ 8] - filter[1] * src[ 8-srcstride] + filter[3] * src[ 8+srcstride] - filter[4] * src[ 8+2*srcstride] + 64) >> 7];
  1874. dst[ 9] = cm[(filter[2] * src[ 9] - filter[1] * src[ 9-srcstride] + filter[3] * src[ 9+srcstride] - filter[4] * src[ 9+2*srcstride] + 64) >> 7];
  1875. dst[10] = cm[(filter[2] * src[10] - filter[1] * src[10-srcstride] + filter[3] * src[10+srcstride] - filter[4] * src[10+2*srcstride] + 64) >> 7];
  1876. dst[11] = cm[(filter[2] * src[11] - filter[1] * src[11-srcstride] + filter[3] * src[11+srcstride] - filter[4] * src[11+2*srcstride] + 64) >> 7];
  1877. dst[12] = cm[(filter[2] * src[12] - filter[1] * src[12-srcstride] + filter[3] * src[12+srcstride] - filter[4] * src[12+2*srcstride] + 64) >> 7];
  1878. dst[13] = cm[(filter[2] * src[13] - filter[1] * src[13-srcstride] + filter[3] * src[13+srcstride] - filter[4] * src[13+2*srcstride] + 64) >> 7];
  1879. dst[14] = cm[(filter[2] * src[14] - filter[1] * src[14-srcstride] + filter[3] * src[14+srcstride] - filter[4] * src[14+2*srcstride] + 64) >> 7];
  1880. dst[15] = cm[(filter[2] * src[15] - filter[1] * src[15-srcstride] + filter[3] * src[15+srcstride] - filter[4] * src[15+2*srcstride] + 64) >> 7];
  1881. */
  1882. __asm__ volatile (
  1883. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1884. "li %[tmp0], 0x07 \n\t"
  1885. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1886. "1: \n\t"
  1887. // 0 - 7
  1888. PUT_VP8_EPEL8_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
  1889. PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
  1890. PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
  1891. // 8 - 15
  1892. PUT_VP8_EPEL8_V4_MMI(%[src0], %[src1], %[dst], %[srcstride])
  1893. "addiu %[h], %[h], -0x01 \n\t"
  1894. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1895. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1896. "bnez %[h], 1b \n\t"
  1897. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1898. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1899. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1900. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1901. [ftmp8]"=&f"(ftmp[8]),
  1902. [tmp0]"=&r"(tmp[0]),
  1903. RESTRICT_ASM_ALL64
  1904. [src0]"=&r"(src0), [dst0]"=&r"(dst0),
  1905. [src1]"=&r"(src1),
  1906. [h]"+&r"(h),
  1907. [dst]"+&r"(dst), [src]"+&r"(src)
  1908. : [ff_pw_64]"f"(ff_pw_64.f),
  1909. [srcstride]"r"((mips_reg)srcstride),
  1910. [dststride]"r"((mips_reg)dststride),
  1911. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  1912. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  1913. : "memory"
  1914. );
  1915. #else
  1916. const uint8_t *filter = subpel_filters[my - 1];
  1917. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1918. int x, y;
  1919. for (y = 0; y < h; y++) {
  1920. for (x = 0; x < 16; x++)
  1921. dst[x] = FILTER_4TAP(src, filter, srcstride);
  1922. dst += dststride;
  1923. src += srcstride;
  1924. }
  1925. #endif
  1926. }
  1927. void ff_put_vp8_epel8_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1928. ptrdiff_t srcstride, int h, int mx, int my)
  1929. {
  1930. #if 1
  1931. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  1932. double ftmp[9];
  1933. uint32_t tmp[1];
  1934. mips_reg src1;
  1935. union av_intfloat64 filter1;
  1936. union av_intfloat64 filter2;
  1937. union av_intfloat64 filter3;
  1938. union av_intfloat64 filter4;
  1939. DECLARE_VAR_ALL64;
  1940. filter1.i = filter[1];
  1941. filter2.i = filter[2];
  1942. filter3.i = filter[3];
  1943. filter4.i = filter[4];
  1944. /*
  1945. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
  1946. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
  1947. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
  1948. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
  1949. dst[4] = cm[(filter[2] * src[4] - filter[1] * src[4-srcstride] + filter[3] * src[4+srcstride] - filter[4] * src[4+2*srcstride] + 64) >> 7];
  1950. dst[5] = cm[(filter[2] * src[5] - filter[1] * src[5-srcstride] + filter[3] * src[5+srcstride] - filter[4] * src[5+2*srcstride] + 64) >> 7];
  1951. dst[6] = cm[(filter[2] * src[6] - filter[1] * src[6-srcstride] + filter[3] * src[6+srcstride] - filter[4] * src[6+2*srcstride] + 64) >> 7];
  1952. dst[7] = cm[(filter[2] * src[7] - filter[1] * src[7-srcstride] + filter[3] * src[7+srcstride] - filter[4] * src[7+2*srcstride] + 64) >> 7];
  1953. */
  1954. __asm__ volatile (
  1955. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  1956. "li %[tmp0], 0x07 \n\t"
  1957. "mtc1 %[tmp0], %[ftmp4] \n\t"
  1958. "1: \n\t"
  1959. PUT_VP8_EPEL8_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
  1960. "addiu %[h], %[h], -0x01 \n\t"
  1961. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  1962. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  1963. "bnez %[h], 1b \n\t"
  1964. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  1965. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  1966. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  1967. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  1968. [ftmp8]"=&f"(ftmp[8]),
  1969. [tmp0]"=&r"(tmp[0]),
  1970. RESTRICT_ASM_ALL64
  1971. [src1]"=&r"(src1),
  1972. [h]"+&r"(h),
  1973. [dst]"+&r"(dst), [src]"+&r"(src)
  1974. : [ff_pw_64]"f"(ff_pw_64.f),
  1975. [srcstride]"r"((mips_reg)srcstride),
  1976. [dststride]"r"((mips_reg)dststride),
  1977. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  1978. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  1979. : "memory"
  1980. );
  1981. #else
  1982. const uint8_t *filter = subpel_filters[my - 1];
  1983. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1984. int x, y;
  1985. for (y = 0; y < h; y++) {
  1986. for (x = 0; x < 8; x++)
  1987. dst[x] = FILTER_4TAP(src, filter, srcstride);
  1988. dst += dststride;
  1989. src += srcstride;
  1990. }
  1991. #endif
  1992. }
  1993. void ff_put_vp8_epel4_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  1994. ptrdiff_t srcstride, int h, int mx, int my)
  1995. {
  1996. #if 1
  1997. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  1998. double ftmp[6];
  1999. uint32_t tmp[1];
  2000. mips_reg src1;
  2001. union av_intfloat64 filter1;
  2002. union av_intfloat64 filter2;
  2003. union av_intfloat64 filter3;
  2004. union av_intfloat64 filter4;
  2005. DECLARE_VAR_LOW32;
  2006. filter1.i = filter[1];
  2007. filter2.i = filter[2];
  2008. filter3.i = filter[3];
  2009. filter4.i = filter[4];
  2010. /*
  2011. dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
  2012. dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
  2013. dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
  2014. dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
  2015. */
  2016. __asm__ volatile (
  2017. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2018. "li %[tmp0], 0x07 \n\t"
  2019. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2020. "1: \n\t"
  2021. PUT_VP8_EPEL4_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
  2022. "addiu %[h], %[h], -0x01 \n\t"
  2023. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  2024. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  2025. "bnez %[h], 1b \n\t"
  2026. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2027. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2028. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2029. [tmp0]"=&r"(tmp[0]),
  2030. RESTRICT_ASM_LOW32
  2031. [src1]"=&r"(src1),
  2032. [h]"+&r"(h),
  2033. [dst]"+&r"(dst), [src]"+&r"(src)
  2034. : [ff_pw_64]"f"(ff_pw_64.f),
  2035. [srcstride]"r"((mips_reg)srcstride),
  2036. [dststride]"r"((mips_reg)dststride),
  2037. [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
  2038. [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
  2039. : "memory"
  2040. );
  2041. #else
  2042. const uint8_t *filter = subpel_filters[my - 1];
  2043. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2044. int x, y;
  2045. for (y = 0; y < h; y++) {
  2046. for (x = 0; x < 4; x++)
  2047. dst[x] = FILTER_4TAP(src, filter, srcstride);
  2048. dst += dststride;
  2049. src += srcstride;
  2050. }
  2051. #endif
  2052. }
  2053. void ff_put_vp8_epel16_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2054. ptrdiff_t srcstride, int h, int mx, int my)
  2055. {
  2056. #if 1
  2057. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  2058. double ftmp[9];
  2059. uint32_t tmp[1];
  2060. mips_reg src0, src1, dst0;
  2061. union av_intfloat64 filter0;
  2062. union av_intfloat64 filter1;
  2063. union av_intfloat64 filter2;
  2064. union av_intfloat64 filter3;
  2065. union av_intfloat64 filter4;
  2066. union av_intfloat64 filter5;
  2067. DECLARE_VAR_ALL64;
  2068. filter0.i = filter[0];
  2069. filter1.i = filter[1];
  2070. filter2.i = filter[2];
  2071. filter3.i = filter[3];
  2072. filter4.i = filter[4];
  2073. filter5.i = filter[5];
  2074. /*
  2075. dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
  2076. dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
  2077. dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
  2078. dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
  2079. dst[4] = cm[(filter[2]*src[4] - filter[1]*src[4-srcstride] + filter[0]*src[4-2*srcstride] + filter[3]*src[4+srcstride] - filter[4]*src[4+2*srcstride] + filter[5]*src[4+3*srcstride] + 64) >> 7];
  2080. dst[5] = cm[(filter[2]*src[5] - filter[1]*src[5-srcstride] + filter[0]*src[5-2*srcstride] + filter[3]*src[5+srcstride] - filter[4]*src[5+2*srcstride] + filter[5]*src[5+3*srcstride] + 64) >> 7];
  2081. dst[6] = cm[(filter[2]*src[6] - filter[1]*src[6-srcstride] + filter[0]*src[6-2*srcstride] + filter[3]*src[6+srcstride] - filter[4]*src[6+2*srcstride] + filter[5]*src[6+3*srcstride] + 64) >> 7];
  2082. dst[7] = cm[(filter[2]*src[7] - filter[1]*src[7-srcstride] + filter[0]*src[7-2*srcstride] + filter[3]*src[7+srcstride] - filter[4]*src[7+2*srcstride] + filter[5]*src[7+3*srcstride] + 64) >> 7];
  2083. dst[ 8] = cm[(filter[2]*src[ 8] - filter[1]*src[ 8-srcstride] + filter[0]*src[ 8-2*srcstride] + filter[3]*src[ 8+srcstride] - filter[4]*src[ 8+2*srcstride] + filter[5]*src[ 8+3*srcstride] + 64) >> 7];
  2084. dst[ 9] = cm[(filter[2]*src[ 9] - filter[1]*src[ 9-srcstride] + filter[0]*src[ 9-2*srcstride] + filter[3]*src[ 9+srcstride] - filter[4]*src[ 9+2*srcstride] + filter[5]*src[ 9+3*srcstride] + 64) >> 7];
  2085. dst[10] = cm[(filter[2]*src[10] - filter[1]*src[10-srcstride] + filter[0]*src[10-2*srcstride] + filter[3]*src[10+srcstride] - filter[4]*src[10+2*srcstride] + filter[5]*src[10+3*srcstride] + 64) >> 7];
  2086. dst[11] = cm[(filter[2]*src[11] - filter[1]*src[11-srcstride] + filter[0]*src[11-2*srcstride] + filter[3]*src[11+srcstride] - filter[4]*src[11+2*srcstride] + filter[5]*src[11+3*srcstride] + 64) >> 7];
  2087. dst[12] = cm[(filter[2]*src[12] - filter[1]*src[12-srcstride] + filter[0]*src[12-2*srcstride] + filter[3]*src[12+srcstride] - filter[4]*src[12+2*srcstride] + filter[5]*src[12+3*srcstride] + 64) >> 7];
  2088. dst[13] = cm[(filter[2]*src[13] - filter[1]*src[13-srcstride] + filter[0]*src[13-2*srcstride] + filter[3]*src[13+srcstride] - filter[4]*src[13+2*srcstride] + filter[5]*src[13+3*srcstride] + 64) >> 7];
  2089. dst[14] = cm[(filter[2]*src[14] - filter[1]*src[14-srcstride] + filter[0]*src[14-2*srcstride] + filter[3]*src[14+srcstride] - filter[4]*src[14+2*srcstride] + filter[5]*src[14+3*srcstride] + 64) >> 7];
  2090. dst[15] = cm[(filter[2]*src[15] - filter[1]*src[15-srcstride] + filter[0]*src[15-2*srcstride] + filter[3]*src[15+srcstride] - filter[4]*src[15+2*srcstride] + filter[5]*src[15+3*srcstride] + 64) >> 7];
  2091. */
  2092. __asm__ volatile (
  2093. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2094. "li %[tmp0], 0x07 \n\t"
  2095. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2096. "1: \n\t"
  2097. // 0 - 7
  2098. PUT_VP8_EPEL8_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
  2099. PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
  2100. PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
  2101. // 8 - 15
  2102. PUT_VP8_EPEL8_V6_MMI(%[src0], %[src1], %[dst0], %[srcstride])
  2103. "addiu %[h], %[h], -0x01 \n\t"
  2104. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  2105. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  2106. "bnez %[h], 1b \n\t"
  2107. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2108. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2109. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2110. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  2111. [ftmp8]"=&f"(ftmp[8]),
  2112. [tmp0]"=&r"(tmp[0]),
  2113. RESTRICT_ASM_ALL64
  2114. [src0]"=&r"(src0), [dst0]"=&r"(dst0),
  2115. [src1]"=&r"(src1),
  2116. [h]"+&r"(h),
  2117. [dst]"+&r"(dst), [src]"+&r"(src)
  2118. : [ff_pw_64]"f"(ff_pw_64.f),
  2119. [srcstride]"r"((mips_reg)srcstride),
  2120. [dststride]"r"((mips_reg)dststride),
  2121. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  2122. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  2123. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  2124. : "memory"
  2125. );
  2126. #else
  2127. const uint8_t *filter = subpel_filters[my - 1];
  2128. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2129. int x, y;
  2130. for (y = 0; y < h; y++) {
  2131. for (x = 0; x < 16; x++)
  2132. dst[x] = FILTER_6TAP(src, filter, srcstride);
  2133. dst += dststride;
  2134. src += srcstride;
  2135. }
  2136. #endif
  2137. }
  2138. void ff_put_vp8_epel8_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2139. ptrdiff_t srcstride, int h, int mx, int my)
  2140. {
  2141. #if 1
  2142. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  2143. double ftmp[9];
  2144. uint32_t tmp[1];
  2145. mips_reg src1;
  2146. union av_intfloat64 filter0;
  2147. union av_intfloat64 filter1;
  2148. union av_intfloat64 filter2;
  2149. union av_intfloat64 filter3;
  2150. union av_intfloat64 filter4;
  2151. union av_intfloat64 filter5;
  2152. DECLARE_VAR_ALL64;
  2153. filter0.i = filter[0];
  2154. filter1.i = filter[1];
  2155. filter2.i = filter[2];
  2156. filter3.i = filter[3];
  2157. filter4.i = filter[4];
  2158. filter5.i = filter[5];
  2159. /*
  2160. dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
  2161. dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
  2162. dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
  2163. dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
  2164. dst[4] = cm[(filter[2]*src[4] - filter[1]*src[4-srcstride] + filter[0]*src[4-2*srcstride] + filter[3]*src[4+srcstride] - filter[4]*src[4+2*srcstride] + filter[5]*src[4+3*srcstride] + 64) >> 7];
  2165. dst[5] = cm[(filter[2]*src[5] - filter[1]*src[5-srcstride] + filter[0]*src[5-2*srcstride] + filter[3]*src[5+srcstride] - filter[4]*src[5+2*srcstride] + filter[5]*src[5+3*srcstride] + 64) >> 7];
  2166. dst[6] = cm[(filter[2]*src[6] - filter[1]*src[6-srcstride] + filter[0]*src[6-2*srcstride] + filter[3]*src[6+srcstride] - filter[4]*src[6+2*srcstride] + filter[5]*src[6+3*srcstride] + 64) >> 7];
  2167. dst[7] = cm[(filter[2]*src[7] - filter[1]*src[7-srcstride] + filter[0]*src[7-2*srcstride] + filter[3]*src[7+srcstride] - filter[4]*src[7+2*srcstride] + filter[5]*src[7+3*srcstride] + 64) >> 7];
  2168. */
  2169. __asm__ volatile (
  2170. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2171. "li %[tmp0], 0x07 \n\t"
  2172. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2173. "1: \n\t"
  2174. PUT_VP8_EPEL8_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
  2175. "addiu %[h], %[h], -0x01 \n\t"
  2176. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  2177. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  2178. "bnez %[h], 1b \n\t"
  2179. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2180. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2181. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2182. [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
  2183. [ftmp8]"=&f"(ftmp[8]),
  2184. [tmp0]"=&r"(tmp[0]),
  2185. RESTRICT_ASM_ALL64
  2186. [src1]"=&r"(src1),
  2187. [h]"+&r"(h),
  2188. [dst]"+&r"(dst), [src]"+&r"(src)
  2189. : [ff_pw_64]"f"(ff_pw_64.f),
  2190. [srcstride]"r"((mips_reg)srcstride),
  2191. [dststride]"r"((mips_reg)dststride),
  2192. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  2193. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  2194. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  2195. : "memory"
  2196. );
  2197. #else
  2198. const uint8_t *filter = subpel_filters[my - 1];
  2199. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2200. int x, y;
  2201. for (y = 0; y < h; y++) {
  2202. for (x = 0; x < 8; x++)
  2203. dst[x] = FILTER_6TAP(src, filter, srcstride);
  2204. dst += dststride;
  2205. src += srcstride;
  2206. }
  2207. #endif
  2208. }
  2209. void ff_put_vp8_epel4_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2210. ptrdiff_t srcstride, int h, int mx, int my)
  2211. {
  2212. #if 1
  2213. const uint64_t *filter = fourtap_subpel_filters[my - 1];
  2214. double ftmp[6];
  2215. uint32_t tmp[1];
  2216. mips_reg src1;
  2217. union av_intfloat64 filter0;
  2218. union av_intfloat64 filter1;
  2219. union av_intfloat64 filter2;
  2220. union av_intfloat64 filter3;
  2221. union av_intfloat64 filter4;
  2222. union av_intfloat64 filter5;
  2223. DECLARE_VAR_LOW32;
  2224. filter0.i = filter[0];
  2225. filter1.i = filter[1];
  2226. filter2.i = filter[2];
  2227. filter3.i = filter[3];
  2228. filter4.i = filter[4];
  2229. filter5.i = filter[5];
  2230. /*
  2231. dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
  2232. dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
  2233. dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
  2234. dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
  2235. */
  2236. __asm__ volatile (
  2237. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2238. "li %[tmp0], 0x07 \n\t"
  2239. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2240. "1: \n\t"
  2241. PUT_VP8_EPEL4_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
  2242. "addiu %[h], %[h], -0x01 \n\t"
  2243. PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
  2244. PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
  2245. "bnez %[h], 1b \n\t"
  2246. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2247. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2248. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2249. [tmp0]"=&r"(tmp[0]),
  2250. RESTRICT_ASM_LOW32
  2251. [src1]"=&r"(src1),
  2252. [h]"+&r"(h),
  2253. [dst]"+&r"(dst), [src]"+&r"(src)
  2254. : [ff_pw_64]"f"(ff_pw_64.f),
  2255. [srcstride]"r"((mips_reg)srcstride),
  2256. [dststride]"r"((mips_reg)dststride),
  2257. [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
  2258. [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
  2259. [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
  2260. : "memory"
  2261. );
  2262. #else
  2263. const uint8_t *filter = subpel_filters[my - 1];
  2264. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2265. int x, y;
  2266. for (y = 0; y < h; y++) {
  2267. for (x = 0; x < 4; x++)
  2268. dst[x] = FILTER_6TAP(src, filter, srcstride);
  2269. dst += dststride;
  2270. src += srcstride;
  2271. }
  2272. #endif
  2273. }
  2274. void ff_put_vp8_epel16_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2275. ptrdiff_t srcstride, int h, int mx, int my)
  2276. {
  2277. #if 1
  2278. DECLARE_ALIGNED(8, uint8_t, tmp_array[560]);
  2279. uint8_t *tmp = tmp_array;
  2280. src -= srcstride;
  2281. ff_put_vp8_epel16_h4_mmi(tmp, 16, src, srcstride, h + 3, mx, my);
  2282. tmp = tmp_array + 16;
  2283. ff_put_vp8_epel16_v4_mmi(dst, dststride, tmp, 16, h, mx, my);
  2284. #else
  2285. const uint8_t *filter = subpel_filters[mx - 1];
  2286. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2287. int x, y;
  2288. uint8_t tmp_array[560];
  2289. uint8_t *tmp = tmp_array;
  2290. src -= srcstride;
  2291. for (y = 0; y < h + 3; y++) {
  2292. for (x = 0; x < 16; x++)
  2293. tmp[x] = FILTER_4TAP(src, filter, 1);
  2294. tmp += 16;
  2295. src += srcstride;
  2296. }
  2297. tmp = tmp_array + 16;
  2298. filter = subpel_filters[my - 1];
  2299. for (y = 0; y < h; y++) {
  2300. for (x = 0; x < 16; x++)
  2301. dst[x] = FILTER_4TAP(tmp, filter, 16);
  2302. dst += dststride;
  2303. tmp += 16;
  2304. }
  2305. #endif
  2306. }
  2307. void ff_put_vp8_epel8_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2308. ptrdiff_t srcstride, int h, int mx, int my)
  2309. {
  2310. #if 1
  2311. DECLARE_ALIGNED(8, uint8_t, tmp_array[152]);
  2312. uint8_t *tmp = tmp_array;
  2313. src -= srcstride;
  2314. ff_put_vp8_epel8_h4_mmi(tmp, 8, src, srcstride, h + 3, mx, my);
  2315. tmp = tmp_array + 8;
  2316. ff_put_vp8_epel8_v4_mmi(dst, dststride, tmp, 8, h, mx, my);
  2317. #else
  2318. const uint8_t *filter = subpel_filters[mx - 1];
  2319. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2320. int x, y;
  2321. uint8_t tmp_array[152];
  2322. uint8_t *tmp = tmp_array;
  2323. src -= srcstride;
  2324. for (y = 0; y < h + 3; y++) {
  2325. for (x = 0; x < 8; x++)
  2326. tmp[x] = FILTER_4TAP(src, filter, 1);
  2327. tmp += 8;
  2328. src += srcstride;
  2329. }
  2330. tmp = tmp_array + 8;
  2331. filter = subpel_filters[my - 1];
  2332. for (y = 0; y < h; y++) {
  2333. for (x = 0; x < 8; x++)
  2334. dst[x] = FILTER_4TAP(tmp, filter, 8);
  2335. dst += dststride;
  2336. tmp += 8;
  2337. }
  2338. #endif
  2339. }
  2340. void ff_put_vp8_epel4_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2341. ptrdiff_t srcstride, int h, int mx, int my)
  2342. {
  2343. #if 1
  2344. DECLARE_ALIGNED(4, uint8_t, tmp_array[44]);
  2345. uint8_t *tmp = tmp_array;
  2346. src -= srcstride;
  2347. ff_put_vp8_epel4_h4_mmi(tmp, 4, src, srcstride, h + 3, mx, my);
  2348. tmp = tmp_array + 4;
  2349. ff_put_vp8_epel4_v4_mmi(dst, dststride, tmp, 4, h, mx, my);
  2350. #else
  2351. const uint8_t *filter = subpel_filters[mx - 1];
  2352. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2353. int x, y;
  2354. uint8_t tmp_array[44];
  2355. uint8_t *tmp = tmp_array;
  2356. src -= srcstride;
  2357. for (y = 0; y < h + 3; y++) {
  2358. for (x = 0; x < 4; x++)
  2359. tmp[x] = FILTER_4TAP(src, filter, 1);
  2360. tmp += 4;
  2361. src += srcstride;
  2362. }
  2363. tmp = tmp_array + 4;
  2364. filter = subpel_filters[my - 1];
  2365. for (y = 0; y < h; y++) {
  2366. for (x = 0; x < 4; x++)
  2367. dst[x] = FILTER_4TAP(tmp, filter, 4);
  2368. dst += dststride;
  2369. tmp += 4;
  2370. }
  2371. #endif
  2372. }
  2373. void ff_put_vp8_epel16_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2374. ptrdiff_t srcstride, int h, int mx, int my)
  2375. {
  2376. #if 1
  2377. DECLARE_ALIGNED(8, uint8_t, tmp_array[592]);
  2378. uint8_t *tmp = tmp_array;
  2379. src -= 2 * srcstride;
  2380. ff_put_vp8_epel16_h4_mmi(tmp, 16, src, srcstride, h + 5, mx, my);
  2381. tmp = tmp_array + 32;
  2382. ff_put_vp8_epel16_v6_mmi(dst, dststride, tmp, 16, h, mx, my);
  2383. #else
  2384. const uint8_t *filter = subpel_filters[mx - 1];
  2385. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2386. int x, y;
  2387. uint8_t tmp_array[592];
  2388. uint8_t *tmp = tmp_array;
  2389. src -= 2 * srcstride;
  2390. for (y = 0; y < h + 5; y++) {
  2391. for (x = 0; x < 16; x++)
  2392. tmp[x] = FILTER_4TAP(src, filter, 1);
  2393. tmp += 16;
  2394. src += srcstride;
  2395. }
  2396. tmp = tmp_array + 32;
  2397. filter = subpel_filters[my - 1];
  2398. for (y = 0; y < h; y++) {
  2399. for (x = 0; x < 16; x++)
  2400. dst[x] = FILTER_6TAP(tmp, filter, 16);
  2401. dst += dststride;
  2402. tmp += 16;
  2403. }
  2404. #endif
  2405. }
  2406. void ff_put_vp8_epel8_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2407. ptrdiff_t srcstride, int h, int mx, int my)
  2408. {
  2409. #if 1
  2410. DECLARE_ALIGNED(8, uint8_t, tmp_array[168]);
  2411. uint8_t *tmp = tmp_array;
  2412. src -= 2 * srcstride;
  2413. ff_put_vp8_epel8_h4_mmi(tmp, 8, src, srcstride, h + 5, mx, my);
  2414. tmp = tmp_array + 16;
  2415. ff_put_vp8_epel8_v6_mmi(dst, dststride, tmp, 8, h, mx, my);
  2416. #else
  2417. const uint8_t *filter = subpel_filters[mx - 1];
  2418. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2419. int x, y;
  2420. uint8_t tmp_array[168];
  2421. uint8_t *tmp = tmp_array;
  2422. src -= 2 * srcstride;
  2423. for (y = 0; y < h + 5; y++) {
  2424. for (x = 0; x < 8; x++)
  2425. tmp[x] = FILTER_4TAP(src, filter, 1);
  2426. tmp += 8;
  2427. src += srcstride;
  2428. }
  2429. tmp = tmp_array + 16;
  2430. filter = subpel_filters[my - 1];
  2431. for (y = 0; y < h; y++) {
  2432. for (x = 0; x < 8; x++)
  2433. dst[x] = FILTER_6TAP(tmp, filter, 8);
  2434. dst += dststride;
  2435. tmp += 8;
  2436. }
  2437. #endif
  2438. }
  2439. void ff_put_vp8_epel4_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2440. ptrdiff_t srcstride, int h, int mx, int my)
  2441. {
  2442. #if 1
  2443. DECLARE_ALIGNED(4, uint8_t, tmp_array[52]);
  2444. uint8_t *tmp = tmp_array;
  2445. src -= 2 * srcstride;
  2446. ff_put_vp8_epel4_h4_mmi(tmp, 4, src, srcstride, h + 5, mx, my);
  2447. tmp = tmp_array + 8;
  2448. ff_put_vp8_epel4_v6_mmi(dst, dststride, tmp, 4, h, mx, my);
  2449. #else
  2450. const uint8_t *filter = subpel_filters[mx - 1];
  2451. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2452. int x, y;
  2453. uint8_t tmp_array[52];
  2454. uint8_t *tmp = tmp_array;
  2455. src -= 2 * srcstride;
  2456. for (y = 0; y < h + 5; y++) {
  2457. for (x = 0; x < 4; x++)
  2458. tmp[x] = FILTER_4TAP(src, filter, 1);
  2459. tmp += 4;
  2460. src += srcstride;
  2461. }
  2462. tmp = tmp_array + 8;
  2463. filter = subpel_filters[my - 1];
  2464. for (y = 0; y < h; y++) {
  2465. for (x = 0; x < 4; x++)
  2466. dst[x] = FILTER_6TAP(tmp, filter, 4);
  2467. dst += dststride;
  2468. tmp += 4;
  2469. }
  2470. #endif
  2471. }
  2472. void ff_put_vp8_epel16_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2473. ptrdiff_t srcstride, int h, int mx, int my)
  2474. {
  2475. #if 1
  2476. DECLARE_ALIGNED(8, uint8_t, tmp_array[560]);
  2477. uint8_t *tmp = tmp_array;
  2478. src -= srcstride;
  2479. ff_put_vp8_epel16_h6_mmi(tmp, 16, src, srcstride, h + 3, mx, my);
  2480. tmp = tmp_array + 16;
  2481. ff_put_vp8_epel16_v4_mmi(dst, dststride, tmp, 16, h, mx, my);
  2482. #else
  2483. const uint8_t *filter = subpel_filters[mx - 1];
  2484. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2485. int x, y;
  2486. uint8_t tmp_array[560];
  2487. uint8_t *tmp = tmp_array;
  2488. src -= srcstride;
  2489. for (y = 0; y < h + 3; y++) {
  2490. for (x = 0; x < 16; x++)
  2491. tmp[x] = FILTER_6TAP(src, filter, 1);
  2492. tmp += 16;
  2493. src += srcstride;
  2494. }
  2495. tmp = tmp_array + 16;
  2496. filter = subpel_filters[my - 1];
  2497. for (y = 0; y < h; y++) {
  2498. for (x = 0; x < 16; x++)
  2499. dst[x] = FILTER_4TAP(tmp, filter, 16);
  2500. dst += dststride;
  2501. tmp += 16;
  2502. }
  2503. #endif
  2504. }
  2505. void ff_put_vp8_epel8_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2506. ptrdiff_t srcstride, int h, int mx, int my)
  2507. {
  2508. #if 1
  2509. DECLARE_ALIGNED(8, uint8_t, tmp_array[152]);
  2510. uint8_t *tmp = tmp_array;
  2511. src -= srcstride;
  2512. ff_put_vp8_epel8_h6_mmi(tmp, 8, src, srcstride, h + 3, mx, my);
  2513. tmp = tmp_array + 8;
  2514. ff_put_vp8_epel8_v4_mmi(dst, dststride, tmp, 8, h, mx, my);
  2515. #else
  2516. const uint8_t *filter = subpel_filters[mx - 1];
  2517. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2518. int x, y;
  2519. uint8_t tmp_array[152];
  2520. uint8_t *tmp = tmp_array;
  2521. src -= srcstride;
  2522. for (y = 0; y < h + 3; y++) {
  2523. for (x = 0; x < 8; x++)
  2524. tmp[x] = FILTER_6TAP(src, filter, 1);
  2525. tmp += 8;
  2526. src += srcstride;
  2527. }
  2528. tmp = tmp_array + 8;
  2529. filter = subpel_filters[my - 1];
  2530. for (y = 0; y < h; y++) {
  2531. for (x = 0; x < 8; x++)
  2532. dst[x] = FILTER_4TAP(tmp, filter, 8);
  2533. dst += dststride;
  2534. tmp += 8;
  2535. }
  2536. #endif
  2537. }
  2538. void ff_put_vp8_epel4_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2539. ptrdiff_t srcstride, int h, int mx, int my)
  2540. {
  2541. #if 1
  2542. DECLARE_ALIGNED(4, uint8_t, tmp_array[44]);
  2543. uint8_t *tmp = tmp_array;
  2544. src -= srcstride;
  2545. ff_put_vp8_epel4_h6_mmi(tmp, 4, src, srcstride, h + 3, mx, my);
  2546. tmp = tmp_array + 4;
  2547. ff_put_vp8_epel4_v4_mmi(dst, dststride, tmp, 4, h, mx, my);
  2548. #else
  2549. const uint8_t *filter = subpel_filters[mx - 1];
  2550. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2551. int x, y;
  2552. uint8_t tmp_array[44];
  2553. uint8_t *tmp = tmp_array;
  2554. src -= srcstride;
  2555. for (y = 0; y < h + 3; y++) {
  2556. for (x = 0; x < 4; x++)
  2557. tmp[x] = FILTER_6TAP(src, filter, 1);
  2558. tmp += 4;
  2559. src += srcstride;
  2560. }
  2561. tmp = tmp_array + 4;
  2562. filter = subpel_filters[my - 1];
  2563. for (y = 0; y < h; y++) {
  2564. for (x = 0; x < 4; x++)
  2565. dst[x] = FILTER_4TAP(tmp, filter, 4);
  2566. dst += dststride;
  2567. tmp += 4;
  2568. }
  2569. #endif
  2570. }
  2571. void ff_put_vp8_epel16_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2572. ptrdiff_t srcstride, int h, int mx, int my)
  2573. {
  2574. #if 1
  2575. DECLARE_ALIGNED(8, uint8_t, tmp_array[592]);
  2576. uint8_t *tmp = tmp_array;
  2577. src -= 2 * srcstride;
  2578. ff_put_vp8_epel16_h6_mmi(tmp, 16, src, srcstride, h + 5, mx, my);
  2579. tmp = tmp_array + 32;
  2580. ff_put_vp8_epel16_v6_mmi(dst, dststride, tmp, 16, h, mx, my);
  2581. #else
  2582. const uint8_t *filter = subpel_filters[mx - 1];
  2583. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2584. int x, y;
  2585. uint8_t tmp_array[592];
  2586. uint8_t *tmp = tmp_array;
  2587. src -= 2 * srcstride;
  2588. for (y = 0; y < h + 5; y++) {
  2589. for (x = 0; x < 16; x++)
  2590. tmp[x] = FILTER_6TAP(src, filter, 1);
  2591. tmp += 16;
  2592. src += srcstride;
  2593. }
  2594. tmp = tmp_array + 32;
  2595. filter = subpel_filters[my - 1];
  2596. for (y = 0; y < h; y++) {
  2597. for (x = 0; x < 16; x++)
  2598. dst[x] = FILTER_6TAP(tmp, filter, 16);
  2599. dst += dststride;
  2600. tmp += 16;
  2601. }
  2602. #endif
  2603. }
  2604. void ff_put_vp8_epel8_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2605. ptrdiff_t srcstride, int h, int mx, int my)
  2606. {
  2607. #if 1
  2608. DECLARE_ALIGNED(8, uint8_t, tmp_array[168]);
  2609. uint8_t *tmp = tmp_array;
  2610. src -= 2 * srcstride;
  2611. ff_put_vp8_epel8_h6_mmi(tmp, 8, src, srcstride, h + 5, mx, my);
  2612. tmp = tmp_array + 16;
  2613. ff_put_vp8_epel8_v6_mmi(dst, dststride, tmp, 8, h, mx, my);
  2614. #else
  2615. const uint8_t *filter = subpel_filters[mx - 1];
  2616. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2617. int x, y;
  2618. uint8_t tmp_array[168];
  2619. uint8_t *tmp = tmp_array;
  2620. src -= 2 * srcstride;
  2621. for (y = 0; y < h + 5; y++) {
  2622. for (x = 0; x < 8; x++)
  2623. tmp[x] = FILTER_6TAP(src, filter, 1);
  2624. tmp += 8;
  2625. src += srcstride;
  2626. }
  2627. tmp = tmp_array + 16;
  2628. filter = subpel_filters[my - 1];
  2629. for (y = 0; y < h; y++) {
  2630. for (x = 0; x < 8; x++)
  2631. dst[x] = FILTER_6TAP(tmp, filter, 8);
  2632. dst += dststride;
  2633. tmp += 8;
  2634. }
  2635. #endif
  2636. }
  2637. void ff_put_vp8_epel4_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
  2638. ptrdiff_t srcstride, int h, int mx, int my)
  2639. {
  2640. #if 1
  2641. DECLARE_ALIGNED(4, uint8_t, tmp_array[52]);
  2642. uint8_t *tmp = tmp_array;
  2643. src -= 2 * srcstride;
  2644. ff_put_vp8_epel4_h6_mmi(tmp, 4, src, srcstride, h + 5, mx, my);
  2645. tmp = tmp_array + 8;
  2646. ff_put_vp8_epel4_v6_mmi(dst, dststride, tmp, 4, h, mx, my);
  2647. #else
  2648. const uint8_t *filter = subpel_filters[mx - 1];
  2649. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  2650. int x, y;
  2651. uint8_t tmp_array[52];
  2652. uint8_t *tmp = tmp_array;
  2653. src -= 2 * srcstride;
  2654. for (y = 0; y < h + 5; y++) {
  2655. for (x = 0; x < 4; x++)
  2656. tmp[x] = FILTER_6TAP(src, filter, 1);
  2657. tmp += 4;
  2658. src += srcstride;
  2659. }
  2660. tmp = tmp_array + 8;
  2661. filter = subpel_filters[my - 1];
  2662. for (y = 0; y < h; y++) {
  2663. for (x = 0; x < 4; x++)
  2664. dst[x] = FILTER_6TAP(tmp, filter, 4);
  2665. dst += dststride;
  2666. tmp += 4;
  2667. }
  2668. #endif
  2669. }
  2670. void ff_put_vp8_bilinear16_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2671. ptrdiff_t sstride, int h, int mx, int my)
  2672. {
  2673. #if 1
  2674. union mmi_intfloat64 a, b;
  2675. double ftmp[7];
  2676. uint32_t tmp[1];
  2677. mips_reg dst0, src0;
  2678. DECLARE_VAR_ALL64;
  2679. a.i = 8 - mx;
  2680. b.i = mx;
  2681. /*
  2682. dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
  2683. dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
  2684. dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
  2685. dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
  2686. dst[4] = (a * src[4] + b * src[5] + 4) >> 3;
  2687. dst[5] = (a * src[5] + b * src[6] + 4) >> 3;
  2688. dst[6] = (a * src[6] + b * src[7] + 4) >> 3;
  2689. dst[7] = (a * src[7] + b * src[8] + 4) >> 3;
  2690. dst[ 8] = (a * src[ 8] + b * src[ 9] + 4) >> 3;
  2691. dst[ 9] = (a * src[ 9] + b * src[10] + 4) >> 3;
  2692. dst[10] = (a * src[10] + b * src[11] + 4) >> 3;
  2693. dst[11] = (a * src[11] + b * src[12] + 4) >> 3;
  2694. dst[12] = (a * src[12] + b * src[13] + 4) >> 3;
  2695. dst[13] = (a * src[13] + b * src[14] + 4) >> 3;
  2696. dst[14] = (a * src[14] + b * src[15] + 4) >> 3;
  2697. dst[15] = (a * src[15] + b * src[16] + 4) >> 3;
  2698. */
  2699. __asm__ volatile (
  2700. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2701. "li %[tmp0], 0x03 \n\t"
  2702. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2703. "pshufh %[a], %[a], %[ftmp0] \n\t"
  2704. "pshufh %[b], %[b], %[ftmp0] \n\t"
  2705. "1: \n\t"
  2706. // 0 - 7
  2707. PUT_VP8_BILINEAR8_H_MMI(%[src], %[dst])
  2708. PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
  2709. PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
  2710. // 8 - 15
  2711. PUT_VP8_BILINEAR8_H_MMI(%[src0], %[dst0])
  2712. "addiu %[h], %[h], -0x01 \n\t"
  2713. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  2714. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  2715. "bnez %[h], 1b \n\t"
  2716. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2717. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2718. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2719. [ftmp6]"=&f"(ftmp[6]),
  2720. [tmp0]"=&r"(tmp[0]),
  2721. RESTRICT_ASM_ALL64
  2722. [dst0]"=&r"(dst0), [src0]"=&r"(src0),
  2723. [h]"+&r"(h),
  2724. [dst]"+&r"(dst), [src]"+&r"(src),
  2725. [a]"+&f"(a.f), [b]"+&f"(b.f)
  2726. : [sstride]"r"((mips_reg)sstride),
  2727. [dstride]"r"((mips_reg)dstride),
  2728. [ff_pw_4]"f"(ff_pw_4.f)
  2729. : "memory"
  2730. );
  2731. #else
  2732. int a = 8 - mx, b = mx;
  2733. int x, y;
  2734. for (y = 0; y < h; y++) {
  2735. for (x = 0; x < 16; x++)
  2736. dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  2737. dst += dstride;
  2738. src += sstride;
  2739. }
  2740. #endif
  2741. }
  2742. void ff_put_vp8_bilinear16_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2743. ptrdiff_t sstride, int h, int mx, int my)
  2744. {
  2745. #if 1
  2746. union mmi_intfloat64 c, d;
  2747. double ftmp[7];
  2748. uint32_t tmp[1];
  2749. mips_reg src0, src1, dst0;
  2750. DECLARE_VAR_ALL64;
  2751. c.i = 8 - my;
  2752. d.i = my;
  2753. /*
  2754. dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
  2755. dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
  2756. dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
  2757. dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
  2758. dst[4] = (c * src[4] + d * src[4 + sstride] + 4) >> 3;
  2759. dst[5] = (c * src[5] + d * src[5 + sstride] + 4) >> 3;
  2760. dst[6] = (c * src[6] + d * src[6 + sstride] + 4) >> 3;
  2761. dst[7] = (c * src[7] + d * src[7 + sstride] + 4) >> 3;
  2762. */
  2763. __asm__ volatile (
  2764. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2765. "li %[tmp0], 0x03 \n\t"
  2766. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2767. "pshufh %[c], %[c], %[ftmp0] \n\t"
  2768. "pshufh %[d], %[d], %[ftmp0] \n\t"
  2769. "1: \n\t"
  2770. // 0 - 7
  2771. PUT_VP8_BILINEAR8_V_MMI(%[src], %[src1], %[dst], %[sstride])
  2772. PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
  2773. PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
  2774. // 8 - 15
  2775. PUT_VP8_BILINEAR8_V_MMI(%[src0], %[src1], %[dst0], %[sstride])
  2776. "addiu %[h], %[h], -0x01 \n\t"
  2777. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  2778. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  2779. "bnez %[h], 1b \n\t"
  2780. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2781. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2782. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2783. [ftmp6]"=&f"(ftmp[6]),
  2784. [tmp0]"=&r"(tmp[0]),
  2785. RESTRICT_ASM_ALL64
  2786. [src0]"=&r"(src0), [dst0]"=&r"(dst0),
  2787. [src1]"=&r"(src1),
  2788. [h]"+&r"(h),
  2789. [dst]"+&r"(dst), [src]"+&r"(src),
  2790. [c]"+&f"(c.f), [d]"+&f"(d.f)
  2791. : [sstride]"r"((mips_reg)sstride),
  2792. [dstride]"r"((mips_reg)dstride),
  2793. [ff_pw_4]"f"(ff_pw_4.f)
  2794. : "memory"
  2795. );
  2796. #else
  2797. int c = 8 - my, d = my;
  2798. int x, y;
  2799. for (y = 0; y < h; y++) {
  2800. for (x = 0; x < 16; x++)
  2801. dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
  2802. dst += dstride;
  2803. src += sstride;
  2804. }
  2805. #endif
  2806. }
  2807. void ff_put_vp8_bilinear16_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2808. ptrdiff_t sstride, int h, int mx, int my)
  2809. {
  2810. #if 1
  2811. DECLARE_ALIGNED(8, uint8_t, tmp_array[528]);
  2812. uint8_t *tmp = tmp_array;
  2813. ff_put_vp8_bilinear16_h_mmi(tmp, 16, src, sstride, h + 1, mx, my);
  2814. ff_put_vp8_bilinear16_v_mmi(dst, dstride, tmp, 16, h, mx, my);
  2815. #else
  2816. int a = 8 - mx, b = mx;
  2817. int c = 8 - my, d = my;
  2818. int x, y;
  2819. uint8_t tmp_array[528];
  2820. uint8_t *tmp = tmp_array;
  2821. for (y = 0; y < h + 1; y++) {
  2822. for (x = 0; x < 16; x++)
  2823. tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  2824. tmp += 16;
  2825. src += sstride;
  2826. }
  2827. tmp = tmp_array;
  2828. for (y = 0; y < h; y++) {
  2829. for (x = 0; x < 16; x++)
  2830. dst[x] = (c * tmp[x] + d * tmp[x + 16] + 4) >> 3;
  2831. dst += dstride;
  2832. tmp += 16;
  2833. }
  2834. #endif
  2835. }
  2836. void ff_put_vp8_bilinear8_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2837. ptrdiff_t sstride, int h, int mx, int my)
  2838. {
  2839. #if 1
  2840. union mmi_intfloat64 a, b;
  2841. double ftmp[7];
  2842. uint32_t tmp[1];
  2843. DECLARE_VAR_ALL64;
  2844. a.i = 8 - mx;
  2845. b.i = mx;
  2846. /*
  2847. dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
  2848. dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
  2849. dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
  2850. dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
  2851. dst[4] = (a * src[4] + b * src[5] + 4) >> 3;
  2852. dst[5] = (a * src[5] + b * src[6] + 4) >> 3;
  2853. dst[6] = (a * src[6] + b * src[7] + 4) >> 3;
  2854. dst[7] = (a * src[7] + b * src[8] + 4) >> 3;
  2855. */
  2856. __asm__ volatile (
  2857. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2858. "li %[tmp0], 0x03 \n\t"
  2859. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2860. "pshufh %[a], %[a], %[ftmp0] \n\t"
  2861. "pshufh %[b], %[b], %[ftmp0] \n\t"
  2862. "1: \n\t"
  2863. PUT_VP8_BILINEAR8_H_MMI(%[src], %[dst])
  2864. "addiu %[h], %[h], -0x01 \n\t"
  2865. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  2866. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  2867. "bnez %[h], 1b \n\t"
  2868. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2869. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2870. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2871. [ftmp6]"=&f"(ftmp[6]),
  2872. [tmp0]"=&r"(tmp[0]),
  2873. RESTRICT_ASM_ALL64
  2874. [h]"+&r"(h),
  2875. [dst]"+&r"(dst), [src]"+&r"(src),
  2876. [a]"+&f"(a.f), [b]"+&f"(b.f)
  2877. : [sstride]"r"((mips_reg)sstride),
  2878. [dstride]"r"((mips_reg)dstride),
  2879. [ff_pw_4]"f"(ff_pw_4.f)
  2880. : "memory"
  2881. );
  2882. #else
  2883. int a = 8 - mx, b = mx;
  2884. int x, y;
  2885. for (y = 0; y < h; y++) {
  2886. for (x = 0; x < 8; x++)
  2887. dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  2888. dst += dstride;
  2889. src += sstride;
  2890. }
  2891. #endif
  2892. }
  2893. void ff_put_vp8_bilinear8_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2894. ptrdiff_t sstride, int h, int mx, int my)
  2895. {
  2896. #if 1
  2897. union mmi_intfloat64 c, d;
  2898. double ftmp[7];
  2899. uint32_t tmp[1];
  2900. mips_reg src1;
  2901. DECLARE_VAR_ALL64;
  2902. c.i = 8 - my;
  2903. d.i = my;
  2904. /*
  2905. dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
  2906. dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
  2907. dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
  2908. dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
  2909. dst[4] = (c * src[4] + d * src[4 + sstride] + 4) >> 3;
  2910. dst[5] = (c * src[5] + d * src[5 + sstride] + 4) >> 3;
  2911. dst[6] = (c * src[6] + d * src[6 + sstride] + 4) >> 3;
  2912. dst[7] = (c * src[7] + d * src[7 + sstride] + 4) >> 3;
  2913. */
  2914. __asm__ volatile (
  2915. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  2916. "li %[tmp0], 0x03 \n\t"
  2917. "mtc1 %[tmp0], %[ftmp4] \n\t"
  2918. "pshufh %[c], %[c], %[ftmp0] \n\t"
  2919. "pshufh %[d], %[d], %[ftmp0] \n\t"
  2920. "1: \n\t"
  2921. PUT_VP8_BILINEAR8_V_MMI(%[src], %[src1], %[dst], %[sstride])
  2922. "addiu %[h], %[h], -0x01 \n\t"
  2923. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  2924. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  2925. "bnez %[h], 1b \n\t"
  2926. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  2927. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  2928. [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
  2929. [ftmp6]"=&f"(ftmp[6]),
  2930. [tmp0]"=&r"(tmp[0]),
  2931. RESTRICT_ASM_ALL64
  2932. [src1]"=&r"(src1),
  2933. [h]"+&r"(h),
  2934. [dst]"+&r"(dst), [src]"+&r"(src),
  2935. [c]"+&f"(c.f), [d]"+&f"(d.f)
  2936. : [sstride]"r"((mips_reg)sstride),
  2937. [dstride]"r"((mips_reg)dstride),
  2938. [ff_pw_4]"f"(ff_pw_4.f)
  2939. : "memory"
  2940. );
  2941. #else
  2942. int c = 8 - my, d = my;
  2943. int x, y;
  2944. for (y = 0; y < h; y++) {
  2945. for (x = 0; x < 8; x++)
  2946. dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
  2947. dst += dstride;
  2948. src += sstride;
  2949. }
  2950. #endif
  2951. }
  2952. void ff_put_vp8_bilinear8_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2953. ptrdiff_t sstride, int h, int mx, int my)
  2954. {
  2955. #if 1
  2956. DECLARE_ALIGNED(8, uint8_t, tmp_array[136]);
  2957. uint8_t *tmp = tmp_array;
  2958. ff_put_vp8_bilinear8_h_mmi(tmp, 8, src, sstride, h + 1, mx, my);
  2959. ff_put_vp8_bilinear8_v_mmi(dst, dstride, tmp, 8, h, mx, my);
  2960. #else
  2961. int a = 8 - mx, b = mx;
  2962. int c = 8 - my, d = my;
  2963. int x, y;
  2964. uint8_t tmp_array[136];
  2965. uint8_t *tmp = tmp_array;
  2966. for (y = 0; y < h + 1; y++) {
  2967. for (x = 0; x < 8; x++)
  2968. tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  2969. tmp += 8;
  2970. src += sstride;
  2971. }
  2972. tmp = tmp_array;
  2973. for (y = 0; y < h; y++) {
  2974. for (x = 0; x < 8; x++)
  2975. dst[x] = (c * tmp[x] + d * tmp[x + 8] + 4) >> 3;
  2976. dst += dstride;
  2977. tmp += 8;
  2978. }
  2979. #endif
  2980. }
  2981. void ff_put_vp8_bilinear4_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  2982. ptrdiff_t sstride, int h, int mx, int my)
  2983. {
  2984. #if 1
  2985. union mmi_intfloat64 a, b;
  2986. double ftmp[5];
  2987. uint32_t tmp[1];
  2988. DECLARE_VAR_LOW32;
  2989. DECLARE_VAR_ALL64;
  2990. a.i = 8 - mx;
  2991. b.i = mx;
  2992. /*
  2993. dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
  2994. dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
  2995. dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
  2996. dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
  2997. */
  2998. __asm__ volatile (
  2999. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  3000. "li %[tmp0], 0x03 \n\t"
  3001. "mtc1 %[tmp0], %[ftmp4] \n\t"
  3002. "pshufh %[a], %[a], %[ftmp0] \n\t"
  3003. "pshufh %[b], %[b], %[ftmp0] \n\t"
  3004. "1: \n\t"
  3005. PUT_VP8_BILINEAR4_H_MMI(%[src], %[dst])
  3006. "addiu %[h], %[h], -0x01 \n\t"
  3007. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  3008. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  3009. "bnez %[h], 1b \n\t"
  3010. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  3011. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  3012. [ftmp4]"=&f"(ftmp[4]),
  3013. [tmp0]"=&r"(tmp[0]),
  3014. RESTRICT_ASM_LOW32
  3015. RESTRICT_ASM_ALL64
  3016. [h]"+&r"(h),
  3017. [dst]"+&r"(dst), [src]"+&r"(src),
  3018. [a]"+&f"(a.f), [b]"+&f"(b.f)
  3019. : [sstride]"r"((mips_reg)sstride),
  3020. [dstride]"r"((mips_reg)dstride),
  3021. [ff_pw_4]"f"(ff_pw_4.f)
  3022. : "memory"
  3023. );
  3024. #else
  3025. int a = 8 - mx, b = mx;
  3026. int x, y;
  3027. for (y = 0; y < h; y++) {
  3028. for (x = 0; x < 4; x++)
  3029. dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  3030. dst += dstride;
  3031. src += sstride;
  3032. }
  3033. #endif
  3034. }
  3035. void ff_put_vp8_bilinear4_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  3036. ptrdiff_t sstride, int h, int mx, int my)
  3037. {
  3038. #if 1
  3039. union mmi_intfloat64 c, d;
  3040. double ftmp[7];
  3041. uint32_t tmp[1];
  3042. mips_reg src1;
  3043. DECLARE_VAR_LOW32;
  3044. DECLARE_VAR_ALL64;
  3045. c.i = 8 - my;
  3046. d.i = my;
  3047. /*
  3048. dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
  3049. dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
  3050. dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
  3051. dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
  3052. */
  3053. __asm__ volatile (
  3054. "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
  3055. "li %[tmp0], 0x03 \n\t"
  3056. "mtc1 %[tmp0], %[ftmp4] \n\t"
  3057. "pshufh %[c], %[c], %[ftmp0] \n\t"
  3058. "pshufh %[d], %[d], %[ftmp0] \n\t"
  3059. "1: \n\t"
  3060. PUT_VP8_BILINEAR4_V_MMI(%[src], %[src1], %[dst], %[sstride])
  3061. "addiu %[h], %[h], -0x01 \n\t"
  3062. PTR_ADDU "%[src], %[src], %[sstride] \n\t"
  3063. PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
  3064. "bnez %[h], 1b \n\t"
  3065. : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
  3066. [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
  3067. [ftmp4]"=&f"(ftmp[4]),
  3068. [tmp0]"=&r"(tmp[0]),
  3069. RESTRICT_ASM_LOW32
  3070. RESTRICT_ASM_ALL64
  3071. [src1]"=&r"(src1),
  3072. [h]"+&r"(h),
  3073. [dst]"+&r"(dst), [src]"+&r"(src),
  3074. [c]"+&f"(c.f), [d]"+&f"(d.f)
  3075. : [sstride]"r"((mips_reg)sstride),
  3076. [dstride]"r"((mips_reg)dstride),
  3077. [ff_pw_4]"f"(ff_pw_4.f)
  3078. : "memory"
  3079. );
  3080. #else
  3081. int c = 8 - my, d = my;
  3082. int x, y;
  3083. for (y = 0; y < h; y++) {
  3084. for (x = 0; x < 4; x++)
  3085. dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
  3086. dst += dstride;
  3087. src += sstride;
  3088. }
  3089. #endif
  3090. }
  3091. void ff_put_vp8_bilinear4_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
  3092. ptrdiff_t sstride, int h, int mx, int my)
  3093. {
  3094. #if 1
  3095. DECLARE_ALIGNED(4, uint8_t, tmp_array[36]);
  3096. uint8_t *tmp = tmp_array;
  3097. ff_put_vp8_bilinear4_h_mmi(tmp, 4, src, sstride, h + 1, mx, my);
  3098. ff_put_vp8_bilinear4_v_mmi(dst, dstride, tmp, 4, h, mx, my);
  3099. #else
  3100. int a = 8 - mx, b = mx;
  3101. int c = 8 - my, d = my;
  3102. int x, y;
  3103. uint8_t tmp_array[36];
  3104. uint8_t *tmp = tmp_array;
  3105. for (y = 0; y < h + 1; y++) {
  3106. for (x = 0; x < 4; x++)
  3107. tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
  3108. tmp += 4;
  3109. src += sstride;
  3110. }
  3111. tmp = tmp_array;
  3112. for (y = 0; y < h; y++) {
  3113. for (x = 0; x < 4; x++)
  3114. dst[x] = (c * tmp[x] + d * tmp[x + 4] + 4) >> 3;
  3115. dst += dstride;
  3116. tmp += 4;
  3117. }
  3118. #endif
  3119. }