| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440 |
- /*
- * Loongson SIMD optimized vp8dsp
- *
- * Copyright (c) 2016 Loongson Technology Corporation Limited
- * Copyright (c) 2016 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "vp8dsp_mips.h"
- #include "constants.h"
- #include "libavutil/attributes.h"
- #include "libavutil/intfloat.h"
- #include "libavutil/mips/mmiutils.h"
- #include "libavutil/mem_internal.h"
- #define DECLARE_DOUBLE_1 double db_1
- #define DECLARE_DOUBLE_2 double db_2
- #define DECLARE_UINT32_T uint32_t it_1
- #define RESTRICT_ASM_DOUBLE_1 [db_1]"=&f"(db_1)
- #define RESTRICT_ASM_DOUBLE_2 [db_2]"=&f"(db_2)
- #define RESTRICT_ASM_UINT32_T [it_1]"=&r"(it_1)
- #define MMI_PCMPGTUB(dst, src1, src2) \
- "pcmpeqb %[db_1], "#src1", "#src2" \n\t" \
- "pmaxub %[db_2], "#src1", "#src2" \n\t" \
- "pcmpeqb %[db_2], %[db_2], "#src1" \n\t" \
- "pxor "#dst", %[db_2], %[db_1] \n\t"
- #define MMI_BTOH(dst_l, dst_r, src) \
- "pxor %[db_1], %[db_1], %[db_1] \n\t" \
- "pcmpgtb %[db_2], %[db_1], "#src" \n\t" \
- "punpcklbh "#dst_r", "#src", %[db_2] \n\t" \
- "punpckhbh "#dst_l", "#src", %[db_2] \n\t"
- #define MMI_VP8_LOOP_FILTER \
- /* Calculation of hev */ \
- "dmtc1 %[thresh], %[ftmp3] \n\t" \
- "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "pasubub %[ftmp0], %[p1], %[p0] \n\t" \
- "pasubub %[ftmp1], %[q1], %[q0] \n\t" \
- "pmaxub %[ftmp0], %[ftmp0], %[ftmp1] \n\t" \
- MMI_PCMPGTUB(%[hev], %[ftmp0], %[ftmp3]) \
- /* Calculation of mask */ \
- "pasubub %[ftmp1], %[p0], %[q0] \n\t" \
- "paddusb %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
- "pasubub %[ftmp2], %[p1], %[q1] \n\t" \
- "li %[tmp0], 0x09 \n\t" \
- "dmtc1 %[tmp0], %[ftmp3] \n\t" \
- PSRLB_MMI(%[ftmp2], %[ftmp3], %[ftmp4], %[ftmp5], %[ftmp2]) \
- "paddusb %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
- "dmtc1 %[e], %[ftmp3] \n\t" \
- "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- MMI_PCMPGTUB(%[mask], %[ftmp1], %[ftmp3]) \
- "pmaxub %[mask], %[mask], %[ftmp0] \n\t" \
- "pasubub %[ftmp1], %[p3], %[p2] \n\t" \
- "pasubub %[ftmp2], %[p2], %[p1] \n\t" \
- "pmaxub %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
- "pmaxub %[mask], %[mask], %[ftmp1] \n\t" \
- "pasubub %[ftmp1], %[q3], %[q2] \n\t" \
- "pasubub %[ftmp2], %[q2], %[q1] \n\t" \
- "pmaxub %[ftmp1], %[ftmp1], %[ftmp2] \n\t" \
- "pmaxub %[mask], %[mask], %[ftmp1] \n\t" \
- "dmtc1 %[i], %[ftmp3] \n\t" \
- "punpcklbh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "punpcklwd %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- MMI_PCMPGTUB(%[mask], %[mask], %[ftmp3]) \
- "pcmpeqw %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "pxor %[mask], %[mask], %[ftmp3] \n\t" \
- /* VP8_MBFILTER */ \
- "li %[tmp0], 0x80808080 \n\t" \
- "dmtc1 %[tmp0], %[ftmp7] \n\t" \
- "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" \
- "pxor %[p2], %[p2], %[ftmp7] \n\t" \
- "pxor %[p1], %[p1], %[ftmp7] \n\t" \
- "pxor %[p0], %[p0], %[ftmp7] \n\t" \
- "pxor %[q0], %[q0], %[ftmp7] \n\t" \
- "pxor %[q1], %[q1], %[ftmp7] \n\t" \
- "pxor %[q2], %[q2], %[ftmp7] \n\t" \
- "psubsb %[ftmp4], %[p1], %[q1] \n\t" \
- "psubb %[ftmp5], %[q0], %[p0] \n\t" \
- MMI_BTOH(%[ftmp1], %[ftmp0], %[ftmp5]) \
- MMI_BTOH(%[ftmp3], %[ftmp2], %[ftmp4]) \
- /* Right part */ \
- "paddh %[ftmp5], %[ftmp0], %[ftmp0] \n\t" \
- "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" \
- "paddh %[ftmp0], %[ftmp2], %[ftmp0] \n\t" \
- /* Left part */ \
- "paddh %[ftmp5], %[ftmp1], %[ftmp1] \n\t" \
- "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" \
- "paddh %[ftmp1], %[ftmp3], %[ftmp1] \n\t" \
- /* Combine left and right part */ \
- "packsshb %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
- "pand %[ftmp1], %[ftmp1], %[mask] \n\t" \
- "pand %[ftmp2], %[ftmp1], %[hev] \n\t" \
- "li %[tmp0], 0x04040404 \n\t" \
- "dmtc1 %[tmp0], %[ftmp0] \n\t" \
- "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
- "paddsb %[ftmp3], %[ftmp2], %[ftmp0] \n\t" \
- "li %[tmp0], 0x0B \n\t" \
- "dmtc1 %[tmp0], %[ftmp4] \n\t" \
- PSRAB_MMI(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], %[ftmp3]) \
- "li %[tmp0], 0x03030303 \n\t" \
- "dmtc1 %[tmp0], %[ftmp0] \n\t" \
- "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
- "paddsb %[ftmp4], %[ftmp2], %[ftmp0] \n\t" \
- "li %[tmp0], 0x0B \n\t" \
- "dmtc1 %[tmp0], %[ftmp2] \n\t" \
- PSRAB_MMI(%[ftmp4], %[ftmp2], %[ftmp5], %[ftmp6], %[ftmp4]) \
- "psubsb %[q0], %[q0], %[ftmp3] \n\t" \
- "paddsb %[p0], %[p0], %[ftmp4] \n\t" \
- /* filt_val &= ~hev */ \
- "pcmpeqw %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
- "pxor %[hev], %[hev], %[ftmp0] \n\t" \
- "pand %[ftmp1], %[ftmp1], %[hev] \n\t" \
- MMI_BTOH(%[ftmp5], %[ftmp6], %[ftmp1]) \
- "li %[tmp0], 0x07 \n\t" \
- "dmtc1 %[tmp0], %[ftmp2] \n\t" \
- "li %[tmp0], 0x001b001b \n\t" \
- "dmtc1 %[tmp0], %[ftmp1] \n\t" \
- "punpcklwd %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
- "li %[tmp0], 0x003f003f \n\t" \
- "dmtc1 %[tmp0], %[ftmp0] \n\t" \
- "punpcklwd %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
- /* Right part */ \
- "pmullh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
- "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- /* Left part */ \
- "pmullh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
- "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
- "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
- /* Combine left and right part */ \
- "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
- "psubsb %[q0], %[q0], %[ftmp4] \n\t" \
- "pxor %[q0], %[q0], %[ftmp7] \n\t" \
- "paddsb %[p0], %[p0], %[ftmp4] \n\t" \
- "pxor %[p0], %[p0], %[ftmp7] \n\t" \
- "li %[tmp0], 0x00120012 \n\t" \
- "dmtc1 %[tmp0], %[ftmp1] \n\t" \
- "punpcklwd %[ftmp1], %[ftmp1], %[ftmp1] \n\t" \
- /* Right part */ \
- "pmullh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
- "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- /* Left part */ \
- "pmullh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
- "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
- "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
- /* Combine left and right part */ \
- "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
- "psubsb %[q1], %[q1], %[ftmp4] \n\t" \
- "pxor %[q1], %[q1], %[ftmp7] \n\t" \
- "paddsb %[p1], %[p1], %[ftmp4] \n\t" \
- "pxor %[p1], %[p1], %[ftmp7] \n\t" \
- "li %[tmp0], 0x03 \n\t" \
- "dmtc1 %[tmp0], %[ftmp1] \n\t" \
- /* Right part */ \
- "psllh %[ftmp3], %[ftmp6], %[ftmp1] \n\t" \
- "paddh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" \
- "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- /* Left part */ \
- "psllh %[ftmp4], %[ftmp5], %[ftmp1] \n\t" \
- "paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
- "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
- "psrah %[ftmp4], %[ftmp4], %[ftmp2] \n\t" \
- /* Combine left and right part */ \
- "packsshb %[ftmp4], %[ftmp3], %[ftmp4] \n\t" \
- "psubsb %[q2], %[q2], %[ftmp4] \n\t" \
- "pxor %[q2], %[q2], %[ftmp7] \n\t" \
- "paddsb %[p2], %[p2], %[ftmp4] \n\t" \
- "pxor %[p2], %[p2], %[ftmp7] \n\t"
- #define PUT_VP8_EPEL4_H6_MMI(src, dst) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, -0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, -0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
- "paddsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x03) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL4_H4_MMI(src, dst) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, -0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "psubsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "psubh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL4_V6_MMI(src, src1, dst, srcstride) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
- \
- PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- PTR_SUBU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
- "paddsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL4_V4_MMI(src, src1, dst, srcstride) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter2] \n\t" \
- \
- PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "psubsh %[ftmp5], %[ftmp3], %[ftmp2] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[filter3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "psubsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_64] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL8_H6_MMI(src, dst) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, -0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, -0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter0] \n\t" \
- "paddsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x03) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter5] \n\t" \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL8_H4_MMI(src, dst) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, -0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
- "psubsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x02) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL8_V6_MMI(src, src1, dst, srcstride) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
- \
- PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- PTR_SUBU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter0] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter0] \n\t" \
- "paddsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter5] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter5] \n\t" \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_EPEL8_V4_MMI(src, src1, dst, srcstride) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter2] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter2] \n\t" \
- \
- PTR_SUBU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter1] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter1] \n\t" \
- "psubsh %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp8], %[ftmp6], %[ftmp3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[filter3] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[filter3] \n\t" \
- \
- PTR_ADDU ""#src1", "#src1", "#srcstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[filter4] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[filter4] \n\t" \
- "psubsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "psubsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_64] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_64] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_BILINEAR8_H_MMI(src, dst) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[a] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[a] \n\t" \
- \
- MMI_ULDC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[b] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[b] \n\t" \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_4] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_4] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_BILINEAR4_H_MMI(src, dst) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[a] \n\t" \
- \
- MMI_ULWC1(%[ftmp1], src, 0x01) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[b] \n\t" \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_BILINEAR8_V_MMI(src, src1, dst, sstride) \
- MMI_ULDC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp5], %[ftmp2], %[c] \n\t" \
- "pmullh %[ftmp6], %[ftmp3], %[c] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#sstride" \n\t" \
- MMI_ULDC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "punpckhbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[d] \n\t" \
- "pmullh %[ftmp3], %[ftmp3], %[d] \n\t" \
- "paddsh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
- \
- "paddsh %[ftmp5], %[ftmp5], %[ff_pw_4] \n\t" \
- "paddsh %[ftmp6], %[ftmp6], %[ff_pw_4] \n\t" \
- "psrah %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
- "psrah %[ftmp6], %[ftmp6], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp5], %[ftmp6] \n\t" \
- MMI_SDC1(%[ftmp1], dst, 0x00)
- #define PUT_VP8_BILINEAR4_V_MMI(src, src1, dst, sstride) \
- MMI_ULWC1(%[ftmp1], src, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp3], %[ftmp2], %[c] \n\t" \
- \
- PTR_ADDU ""#src1", "#src", "#sstride" \n\t" \
- MMI_ULWC1(%[ftmp1], src1, 0x00) \
- "punpcklbh %[ftmp2], %[ftmp1], %[ftmp0] \n\t" \
- "pmullh %[ftmp2], %[ftmp2], %[d] \n\t" \
- "paddsh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" \
- \
- "paddsh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t" \
- "psrah %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
- \
- "packushb %[ftmp1], %[ftmp3], %[ftmp0] \n\t" \
- MMI_SWC1(%[ftmp1], dst, 0x00)
- DECLARE_ALIGNED(8, static const uint64_t, fourtap_subpel_filters[7][6]) = {
- {0x0000000000000000, 0x0006000600060006, 0x007b007b007b007b,
- 0x000c000c000c000c, 0x0001000100010001, 0x0000000000000000},
- {0x0002000200020002, 0x000b000b000b000b, 0x006c006c006c006c,
- 0x0024002400240024, 0x0008000800080008, 0x0001000100010001},
- {0x0000000000000000, 0x0009000900090009, 0x005d005d005d005d,
- 0x0032003200320032, 0x0006000600060006, 0x0000000000000000},
- {0x0003000300030003, 0x0010001000100010, 0x004d004d004d004d,
- 0x004d004d004d004d, 0x0010001000100010, 0x0003000300030003},
- {0x0000000000000000, 0x0006000600060006, 0x0032003200320032,
- 0x005d005d005d005d, 0x0009000900090009, 0x0000000000000000},
- {0x0001000100010001, 0x0008000800080008, 0x0024002400240024,
- 0x006c006c006c006c, 0x000b000b000b000b, 0x0002000200020002},
- {0x0000000000000000, 0x0001000100010001, 0x000c000c000c000c,
- 0x007b007b007b007b, 0x0006000600060006, 0x0000000000000000}
- };
- #if 0
- #define FILTER_6TAP(src, F, stride) \
- cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
- F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
- F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
- #define FILTER_4TAP(src, F, stride) \
- cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
- F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
- static const uint8_t subpel_filters[7][6] = {
- { 0, 6, 123, 12, 1, 0 },
- { 2, 11, 108, 36, 8, 1 },
- { 0, 9, 93, 50, 6, 0 },
- { 3, 16, 77, 77, 16, 3 },
- { 0, 6, 50, 93, 9, 0 },
- { 1, 8, 36, 108, 11, 2 },
- { 0, 1, 12, 123, 6, 0 },
- };
- #define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
- #define MUL_35468(a) (((a) * 35468) >> 16)
- #endif
- #define clip_int8(n) (cm[(n) + 0x80] - 0x80)
- static av_always_inline void vp8_filter_common_is4tap(uint8_t *p,
- ptrdiff_t stride)
- {
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- int a, f1, f2;
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- a = 3 * (q0 - p0);
- a += clip_int8(p1 - q1);
- a = clip_int8(a);
- // We deviate from the spec here with c(a+3) >> 3
- // since that's what libvpx does.
- f1 = FFMIN(a + 4, 127) >> 3;
- f2 = FFMIN(a + 3, 127) >> 3;
- // Despite what the spec says, we do need to clamp here to
- // be bitexact with libvpx.
- p[-1 * stride] = cm[p0 + f2];
- p[ 0 * stride] = cm[q0 - f1];
- }
- static av_always_inline void vp8_filter_common_isnot4tap(uint8_t *p,
- ptrdiff_t stride)
- {
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- int a, f1, f2;
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- a = 3 * (q0 - p0);
- a = clip_int8(a);
- // We deviate from the spec here with c(a+3) >> 3
- // since that's what libvpx does.
- f1 = FFMIN(a + 4, 127) >> 3;
- f2 = FFMIN(a + 3, 127) >> 3;
- // Despite what the spec says, we do need to clamp here to
- // be bitexact with libvpx.
- p[-1 * stride] = cm[p0 + f2];
- p[ 0 * stride] = cm[q0 - f1];
- a = (f1 + 1) >> 1;
- p[-2 * stride] = cm[p1 + a];
- p[ 1 * stride] = cm[q1 - a];
- }
- static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
- int flim)
- {
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
- }
- static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
- {
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
- }
- static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
- {
- int a0, a1, a2, w;
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- av_unused int p2 = p[-3 * stride];
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- av_unused int q2 = p[ 2 * stride];
- w = clip_int8(p1 - q1);
- w = clip_int8(w + 3 * (q0 - p0));
- a0 = (27 * w + 63) >> 7;
- a1 = (18 * w + 63) >> 7;
- a2 = (9 * w + 63) >> 7;
- p[-3 * stride] = cm[p2 + a2];
- p[-2 * stride] = cm[p1 + a1];
- p[-1 * stride] = cm[p0 + a0];
- p[ 0 * stride] = cm[q0 - a0];
- p[ 1 * stride] = cm[q1 - a1];
- p[ 2 * stride] = cm[q2 - a2];
- }
- static av_always_inline int vp8_normal_limit(uint8_t *p, ptrdiff_t stride,
- int E, int I)
- {
- av_unused int p3 = p[-4 * stride];
- av_unused int p2 = p[-3 * stride];
- av_unused int p1 = p[-2 * stride];
- av_unused int p0 = p[-1 * stride];
- av_unused int q0 = p[ 0 * stride];
- av_unused int q1 = p[ 1 * stride];
- av_unused int q2 = p[ 2 * stride];
- av_unused int q3 = p[ 3 * stride];
- return vp8_simple_limit(p, stride, E) &&
- FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I &&
- FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I &&
- FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I;
- }
- static av_always_inline void vp8_v_loop_filter8_mmi(uint8_t *dst,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- double ftmp[18];
- uint32_t tmp[1];
- DECLARE_DOUBLE_1;
- DECLARE_DOUBLE_2;
- DECLARE_UINT32_T;
- DECLARE_VAR_ALL64;
- __asm__ volatile(
- /* Get data from dst */
- MMI_ULDC1(%[q0], %[dst], 0x0)
- PTR_SUBU "%[tmp0], %[dst], %[stride] \n\t"
- MMI_ULDC1(%[p0], %[tmp0], 0x0)
- PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[p1], %[tmp0], 0x0)
- PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[p2], %[tmp0], 0x0)
- PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[p3], %[tmp0], 0x0)
- PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
- MMI_ULDC1(%[q1], %[tmp0], 0x0)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q2], %[tmp0], 0x0)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q3], %[tmp0], 0x0)
- MMI_VP8_LOOP_FILTER
- /* Move to dst */
- MMI_USDC1(%[q0], %[dst], 0x0)
- PTR_SUBU "%[tmp0], %[dst], %[stride] \n\t"
- MMI_USDC1(%[p0], %[tmp0], 0x0)
- PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_USDC1(%[p1], %[tmp0], 0x0)
- PTR_SUBU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_USDC1(%[p2], %[tmp0], 0x0)
- PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
- MMI_USDC1(%[q1], %[tmp0], 0x0)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_USDC1(%[q2], %[tmp0], 0x0)
- : RESTRICT_ASM_ALL64
- [p3]"=&f"(ftmp[0]), [p2]"=&f"(ftmp[1]),
- [p1]"=&f"(ftmp[2]), [p0]"=&f"(ftmp[3]),
- [q0]"=&f"(ftmp[4]), [q1]"=&f"(ftmp[5]),
- [q2]"=&f"(ftmp[6]), [q3]"=&f"(ftmp[7]),
- [ftmp0]"=&f"(ftmp[8]), [ftmp1]"=&f"(ftmp[9]),
- [ftmp2]"=&f"(ftmp[10]), [ftmp3]"=&f"(ftmp[11]),
- [hev]"=&f"(ftmp[12]), [mask]"=&f"(ftmp[13]),
- [ftmp4]"=&f"(ftmp[14]), [ftmp5]"=&f"(ftmp[15]),
- [ftmp6]"=&f"(ftmp[16]), [ftmp7]"=&f"(ftmp[17]),
- [dst]"+&r"(dst), [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_DOUBLE_1, RESTRICT_ASM_DOUBLE_2,
- RESTRICT_ASM_UINT32_T
- : [e]"r"((mips_reg)flim_E), [thresh]"r"((mips_reg)hev_thresh),
- [i]"r"((mips_reg)flim_I), [stride]"r"((mips_reg)stride)
- : "memory"
- );
- }
- static av_always_inline void vp8_v_loop_filter8_inner_mmi(uint8_t *dst,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- int i;
- for (i = 0; i < 8; i++)
- if (vp8_normal_limit(dst + i * 1, stride, flim_E, flim_I)) {
- int hv = hev(dst + i * 1, stride, hev_thresh);
- if (hv)
- vp8_filter_common_is4tap(dst + i * 1, stride);
- else
- vp8_filter_common_isnot4tap(dst + i * 1, stride);
- }
- }
- static av_always_inline void vp8_h_loop_filter8_mmi(uint8_t *dst,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- double ftmp[18];
- uint32_t tmp[1];
- DECLARE_DOUBLE_1;
- DECLARE_DOUBLE_2;
- DECLARE_UINT32_T;
- DECLARE_VAR_ALL64;
- __asm__ volatile(
- /* Get data from dst */
- MMI_ULDC1(%[p3], %[dst], -0x04)
- PTR_ADDU "%[tmp0], %[dst], %[stride] \n\t"
- MMI_ULDC1(%[p2], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[p1], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[p0], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q0], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q1], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q2], %[tmp0], -0x04)
- PTR_ADDU "%[tmp0], %[tmp0], %[stride] \n\t"
- MMI_ULDC1(%[q3], %[tmp0], -0x04)
- /* Matrix transpose */
- TRANSPOSE_8B(%[p3], %[p2], %[p1], %[p0],
- %[q0], %[q1], %[q2], %[q3],
- %[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4])
- MMI_VP8_LOOP_FILTER
- /* Matrix transpose */
- TRANSPOSE_8B(%[p3], %[p2], %[p1], %[p0],
- %[q0], %[q1], %[q2], %[q3],
- %[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4])
- /* Move to dst */
- MMI_USDC1(%[p3], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[p2], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[p1], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[p0], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[q0], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[q1], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[q2], %[dst], -0x04)
- PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
- MMI_USDC1(%[q3], %[dst], -0x04)
- : RESTRICT_ASM_ALL64
- [p3]"=&f"(ftmp[0]), [p2]"=&f"(ftmp[1]),
- [p1]"=&f"(ftmp[2]), [p0]"=&f"(ftmp[3]),
- [q0]"=&f"(ftmp[4]), [q1]"=&f"(ftmp[5]),
- [q2]"=&f"(ftmp[6]), [q3]"=&f"(ftmp[7]),
- [ftmp0]"=&f"(ftmp[8]), [ftmp1]"=&f"(ftmp[9]),
- [ftmp2]"=&f"(ftmp[10]), [ftmp3]"=&f"(ftmp[11]),
- [hev]"=&f"(ftmp[12]), [mask]"=&f"(ftmp[13]),
- [ftmp4]"=&f"(ftmp[14]), [ftmp5]"=&f"(ftmp[15]),
- [ftmp6]"=&f"(ftmp[16]), [ftmp7]"=&f"(ftmp[17]),
- [dst]"+&r"(dst), [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_DOUBLE_1, RESTRICT_ASM_DOUBLE_2,
- RESTRICT_ASM_UINT32_T
- : [e]"r"((mips_reg)flim_E), [thresh]"r"((mips_reg)hev_thresh),
- [i]"r"((mips_reg)flim_I), [stride]"r"((mips_reg)stride)
- : "memory"
- );
- }
- static av_always_inline void vp8_h_loop_filter8_inner_mmi(uint8_t *dst,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- int i;
- for (i = 0; i < 8; i++)
- if (vp8_normal_limit(dst + i * stride, 1, flim_E, flim_I)) {
- int hv = hev(dst + i * stride, 1, hev_thresh);
- if (hv)
- vp8_filter_common_is4tap(dst + i * stride, 1);
- else
- vp8_filter_common_isnot4tap(dst + i * stride, 1);
- }
- }
- void ff_vp8_luma_dc_wht_mmi(int16_t block[4][4][16], int16_t dc[16])
- {
- #if 1
- double ftmp[8];
- DECLARE_VAR_ALL64;
- __asm__ volatile (
- MMI_LDC1(%[ftmp0], %[dc], 0x00)
- MMI_LDC1(%[ftmp1], %[dc], 0x08)
- MMI_LDC1(%[ftmp2], %[dc], 0x10)
- MMI_LDC1(%[ftmp3], %[dc], 0x18)
- "paddsh %[ftmp4], %[ftmp0], %[ftmp3] \n\t"
- "psubsh %[ftmp5], %[ftmp0], %[ftmp3] \n\t"
- "paddsh %[ftmp6], %[ftmp1], %[ftmp2] \n\t"
- "psubsh %[ftmp7], %[ftmp1], %[ftmp2] \n\t"
- "paddsh %[ftmp0], %[ftmp4], %[ftmp6] \n\t"
- "paddsh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
- "psubsh %[ftmp2], %[ftmp4], %[ftmp6] \n\t"
- "psubsh %[ftmp3], %[ftmp5], %[ftmp7] \n\t"
- MMI_SDC1(%[ftmp0], %[dc], 0x00)
- MMI_SDC1(%[ftmp1], %[dc], 0x08)
- MMI_SDC1(%[ftmp2], %[dc], 0x10)
- MMI_SDC1(%[ftmp3], %[dc], 0x18)
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]),
- RESTRICT_ASM_ALL64
- [ftmp7]"=&f"(ftmp[7])
- : [dc]"r"((uint8_t*)dc)
- : "memory"
- );
- block[0][0][0] = (dc[0] + dc[3] + 3 + dc[1] + dc[2]) >> 3;
- block[0][1][0] = (dc[0] - dc[3] + 3 + dc[1] - dc[2]) >> 3;
- block[0][2][0] = (dc[0] + dc[3] + 3 - dc[1] - dc[2]) >> 3;
- block[0][3][0] = (dc[0] - dc[3] + 3 - dc[1] + dc[2]) >> 3;
- block[1][0][0] = (dc[4] + dc[7] + 3 + dc[5] + dc[6]) >> 3;
- block[1][1][0] = (dc[4] - dc[7] + 3 + dc[5] - dc[6]) >> 3;
- block[1][2][0] = (dc[4] + dc[7] + 3 - dc[5] - dc[6]) >> 3;
- block[1][3][0] = (dc[4] - dc[7] + 3 - dc[5] + dc[6]) >> 3;
- block[2][0][0] = (dc[8] + dc[11] + 3 + dc[9] + dc[10]) >> 3;
- block[2][1][0] = (dc[8] - dc[11] + 3 + dc[9] - dc[10]) >> 3;
- block[2][2][0] = (dc[8] + dc[11] + 3 - dc[9] - dc[10]) >> 3;
- block[2][3][0] = (dc[8] - dc[11] + 3 - dc[9] + dc[10]) >> 3;
- block[3][0][0] = (dc[12] + dc[15] + 3 + dc[13] + dc[14]) >> 3;
- block[3][1][0] = (dc[12] - dc[15] + 3 + dc[13] - dc[14]) >> 3;
- block[3][2][0] = (dc[12] + dc[15] + 3 - dc[13] - dc[14]) >> 3;
- block[3][3][0] = (dc[12] - dc[15] + 3 - dc[13] + dc[14]) >> 3;
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- MMI_SDC1(%[ftmp0], %[dc], 0x00)
- MMI_SDC1(%[ftmp0], %[dc], 0x08)
- MMI_SDC1(%[ftmp0], %[dc], 0x10)
- MMI_SDC1(%[ftmp0], %[dc], 0x18)
- : RESTRICT_ASM_ALL64
- [ftmp0]"=&f"(ftmp[0])
- : [dc]"r"((uint8_t *)dc)
- : "memory"
- );
- #else
- int t00, t01, t02, t03, t10, t11, t12, t13, t20, t21, t22, t23, t30, t31, t32, t33;
- t00 = dc[0] + dc[12];
- t10 = dc[1] + dc[13];
- t20 = dc[2] + dc[14];
- t30 = dc[3] + dc[15];
- t03 = dc[0] - dc[12];
- t13 = dc[1] - dc[13];
- t23 = dc[2] - dc[14];
- t33 = dc[3] - dc[15];
- t01 = dc[4] + dc[ 8];
- t11 = dc[5] + dc[ 9];
- t21 = dc[6] + dc[10];
- t31 = dc[7] + dc[11];
- t02 = dc[4] - dc[ 8];
- t12 = dc[5] - dc[ 9];
- t22 = dc[6] - dc[10];
- t32 = dc[7] - dc[11];
- dc[ 0] = t00 + t01;
- dc[ 1] = t10 + t11;
- dc[ 2] = t20 + t21;
- dc[ 3] = t30 + t31;
- dc[ 4] = t03 + t02;
- dc[ 5] = t13 + t12;
- dc[ 6] = t23 + t22;
- dc[ 7] = t33 + t32;
- dc[ 8] = t00 - t01;
- dc[ 9] = t10 - t11;
- dc[10] = t20 - t21;
- dc[11] = t30 - t31;
- dc[12] = t03 - t02;
- dc[13] = t13 - t12;
- dc[14] = t23 - t22;
- dc[15] = t33 - t32;
- block[0][0][0] = (dc[0] + dc[3] + 3 + dc[1] + dc[2]) >> 3;
- block[0][1][0] = (dc[0] - dc[3] + 3 + dc[1] - dc[2]) >> 3;
- block[0][2][0] = (dc[0] + dc[3] + 3 - dc[1] - dc[2]) >> 3;
- block[0][3][0] = (dc[0] - dc[3] + 3 - dc[1] + dc[2]) >> 3;
- block[1][0][0] = (dc[4] + dc[7] + 3 + dc[5] + dc[6]) >> 3;
- block[1][1][0] = (dc[4] - dc[7] + 3 + dc[5] - dc[6]) >> 3;
- block[1][2][0] = (dc[4] + dc[7] + 3 - dc[5] - dc[6]) >> 3;
- block[1][3][0] = (dc[4] - dc[7] + 3 - dc[5] + dc[6]) >> 3;
- block[2][0][0] = (dc[8] + dc[11] + 3 + dc[9] + dc[10]) >> 3;
- block[2][1][0] = (dc[8] - dc[11] + 3 + dc[9] - dc[10]) >> 3;
- block[2][2][0] = (dc[8] + dc[11] + 3 - dc[9] - dc[10]) >> 3;
- block[2][3][0] = (dc[8] - dc[11] + 3 - dc[9] + dc[10]) >> 3;
- block[3][0][0] = (dc[12] + dc[15] + 3 + dc[13] + dc[14]) >> 3;
- block[3][1][0] = (dc[12] - dc[15] + 3 + dc[13] - dc[14]) >> 3;
- block[3][2][0] = (dc[12] + dc[15] + 3 - dc[13] - dc[14]) >> 3;
- block[3][3][0] = (dc[12] - dc[15] + 3 - dc[13] + dc[14]) >> 3;
- AV_ZERO64(dc + 0);
- AV_ZERO64(dc + 4);
- AV_ZERO64(dc + 8);
- AV_ZERO64(dc + 12);
- #endif
- }
- void ff_vp8_luma_dc_wht_dc_mmi(int16_t block[4][4][16], int16_t dc[16])
- {
- int val = (dc[0] + 3) >> 3;
- dc[0] = 0;
- block[0][0][0] = val;
- block[0][1][0] = val;
- block[0][2][0] = val;
- block[0][3][0] = val;
- block[1][0][0] = val;
- block[1][1][0] = val;
- block[1][2][0] = val;
- block[1][3][0] = val;
- block[2][0][0] = val;
- block[2][1][0] = val;
- block[2][2][0] = val;
- block[2][3][0] = val;
- block[3][0][0] = val;
- block[3][1][0] = val;
- block[3][2][0] = val;
- block[3][3][0] = val;
- }
- void ff_vp8_idct_add_mmi(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
- {
- #if 1
- double ftmp[12];
- uint32_t tmp[1];
- union av_intfloat64 ff_ph_4e7b_u;
- union av_intfloat64 ff_ph_22a3_u;
- DECLARE_VAR_LOW32;
- DECLARE_VAR_ALL64;
- ff_ph_4e7b_u.i = 0x4e7b4e7b4e7b4e7bULL;
- ff_ph_22a3_u.i = 0x22a322a322a322a3ULL;
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- MMI_LDC1(%[ftmp1], %[block], 0x00)
- MMI_LDC1(%[ftmp2], %[block], 0x08)
- MMI_LDC1(%[ftmp3], %[block], 0x10)
- MMI_LDC1(%[ftmp4], %[block], 0x18)
- "li %[tmp0], 0x02 \n\t"
- "mtc1 %[tmp0], %[ftmp11] \n\t"
- // block[0...3] + block[8...11]
- "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
- // block[0...3] - block[8...11]
- "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
- // MUL_35468(block[12...15])
- "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
- "pmulhh %[ftmp7], %[ftmp9], %[ff_ph_22a3] \n\t"
- // MUL_35468(block[4...7])
- "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
- "pmulhh %[ftmp8], %[ftmp9], %[ff_ph_22a3] \n\t"
- // MUL_20091(block[4...7]
- "pmulhh %[ftmp9], %[ftmp2], %[ff_ph_4e7b] \n\t"
- "paddh %[ftmp9], %[ftmp9], %[ftmp2] \n\t"
- // MUL_20091(block[12...15])
- "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
- "paddh %[ftmp10], %[ftmp10], %[ftmp4] \n\t"
- // tmp[0 4 8 12]
- "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
- "paddh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
- // tmp[1 5 9 13]
- "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
- "psubh %[ftmp2], %[ftmp2], %[ftmp10] \n\t"
- // tmp[2 6 10 14]
- "psubh %[ftmp3], %[ftmp6], %[ftmp8] \n\t"
- "paddh %[ftmp3], %[ftmp3], %[ftmp10] \n\t"
- // tmp[3 7 11 15]
- "psubh %[ftmp4], %[ftmp5], %[ftmp7] \n\t"
- "psubh %[ftmp4], %[ftmp4], %[ftmp9] \n\t"
- MMI_SDC1(%[ftmp0], %[block], 0x00)
- MMI_SDC1(%[ftmp0], %[block], 0x08)
- MMI_SDC1(%[ftmp0], %[block], 0x10)
- MMI_SDC1(%[ftmp0], %[block], 0x18)
- TRANSPOSE_4H(%[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4],
- %[ftmp5], %[ftmp6], %[ftmp7], %[ftmp8])
- // t[0 4 8 12]
- "paddh %[ftmp5], %[ftmp1], %[ftmp3] \n\t"
- // t[1 5 9 13]
- "psubh %[ftmp6], %[ftmp1], %[ftmp3] \n\t"
- // t[2 6 10 14]
- "psllh %[ftmp9], %[ftmp2], %[ftmp11] \n\t"
- "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
- "psubh %[ftmp7], %[ftmp9], %[ftmp4] \n\t"
- "pmulhh %[ftmp10], %[ftmp4], %[ff_ph_4e7b] \n\t"
- "psubh %[ftmp7], %[ftmp7], %[ftmp10] \n\t"
- // t[3 7 11 15]
- "psllh %[ftmp9], %[ftmp4], %[ftmp11] \n\t"
- "pmulhh %[ftmp9], %[ftmp9], %[ff_ph_22a3] \n\t"
- "paddh %[ftmp8], %[ftmp9], %[ftmp2] \n\t"
- "pmulhh %[ftmp10], %[ftmp2], %[ff_ph_4e7b] \n\t"
- "paddh %[ftmp8], %[ftmp8], %[ftmp10] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp11] \n\t"
- "paddh %[ftmp1], %[ftmp5], %[ftmp8] \n\t"
- "paddh %[ftmp1], %[ftmp1], %[ff_pw_4] \n\t"
- "psrah %[ftmp1], %[ftmp1], %[ftmp11] \n\t"
- "paddh %[ftmp2], %[ftmp6], %[ftmp7] \n\t"
- "paddh %[ftmp2], %[ftmp2], %[ff_pw_4] \n\t"
- "psrah %[ftmp2], %[ftmp2], %[ftmp11] \n\t"
- "psubh %[ftmp3], %[ftmp6], %[ftmp7] \n\t"
- "paddh %[ftmp3], %[ftmp3], %[ff_pw_4] \n\t"
- "psrah %[ftmp3], %[ftmp3], %[ftmp11] \n\t"
- "psubh %[ftmp4], %[ftmp5], %[ftmp8] \n\t"
- "paddh %[ftmp4], %[ftmp4], %[ff_pw_4] \n\t"
- "psrah %[ftmp4], %[ftmp4], %[ftmp11] \n\t"
- TRANSPOSE_4H(%[ftmp1], %[ftmp2], %[ftmp3], %[ftmp4],
- %[ftmp5], %[ftmp6], %[ftmp7], %[ftmp8])
- MMI_LWC1(%[ftmp5], %[dst0], 0x00)
- MMI_LWC1(%[ftmp6], %[dst1], 0x00)
- MMI_LWC1(%[ftmp7], %[dst2], 0x00)
- MMI_LWC1(%[ftmp8], %[dst3], 0x00)
- "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
- "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
- "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
- "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
- "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
- "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
- "paddh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
- "paddh %[ftmp4], %[ftmp4], %[ftmp8] \n\t"
- "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
- "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
- "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
- "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
- MMI_SWC1(%[ftmp1], %[dst0], 0x00)
- MMI_SWC1(%[ftmp2], %[dst1], 0x00)
- MMI_SWC1(%[ftmp3], %[dst2], 0x00)
- MMI_SWC1(%[ftmp4], %[dst3], 0x00)
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
- [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
- RESTRICT_ASM_LOW32
- RESTRICT_ASM_ALL64
- [tmp0]"=&r"(tmp[0])
- : [dst0]"r"(dst), [dst1]"r"(dst+stride),
- [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride),
- [block]"r"(block), [ff_pw_4]"f"(ff_pw_4.f),
- [ff_ph_4e7b]"f"(ff_ph_4e7b_u.f), [ff_ph_22a3]"f"(ff_ph_22a3_u.f)
- : "memory"
- );
- #else
- int i, t0, t1, t2, t3;
- int16_t tmp[16];
- for (i = 0; i < 4; i++) {
- t0 = block[0 + i] + block[8 + i];
- t1 = block[0 + i] - block[8 + i];
- t2 = MUL_35468(block[4 + i]) - MUL_20091(block[12 + i]);
- t3 = MUL_20091(block[4 + i]) + MUL_35468(block[12 + i]);
- block[ 0 + i] = 0;
- block[ 4 + i] = 0;
- block[ 8 + i] = 0;
- block[12 + i] = 0;
- tmp[i * 4 + 0] = t0 + t3;
- tmp[i * 4 + 1] = t1 + t2;
- tmp[i * 4 + 2] = t1 - t2;
- tmp[i * 4 + 3] = t0 - t3;
- }
- for (i = 0; i < 4; i++) {
- t0 = tmp[0 + i] + tmp[8 + i];
- t1 = tmp[0 + i] - tmp[8 + i];
- t2 = MUL_35468(tmp[4 + i]) - MUL_20091(tmp[12 + i]);
- t3 = MUL_20091(tmp[4 + i]) + MUL_35468(tmp[12 + i]);
- dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
- dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
- dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
- dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
- dst += stride;
- }
- #endif
- }
- void ff_vp8_idct_dc_add_mmi(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
- {
- #if 1
- int dc = (block[0] + 4) >> 3;
- double ftmp[6];
- DECLARE_VAR_LOW32;
- block[0] = 0;
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "mtc1 %[dc], %[ftmp5] \n\t"
- MMI_LWC1(%[ftmp1], %[dst0], 0x00)
- MMI_LWC1(%[ftmp2], %[dst1], 0x00)
- MMI_LWC1(%[ftmp3], %[dst2], 0x00)
- MMI_LWC1(%[ftmp4], %[dst3], 0x00)
- "pshufh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
- "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
- "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
- "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
- "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
- "paddsh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
- "paddsh %[ftmp2], %[ftmp2], %[ftmp5] \n\t"
- "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t"
- "paddsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t"
- "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
- "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
- "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t"
- "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
- MMI_SWC1(%[ftmp1], %[dst0], 0x00)
- MMI_SWC1(%[ftmp2], %[dst1], 0x00)
- MMI_SWC1(%[ftmp3], %[dst2], 0x00)
- MMI_SWC1(%[ftmp4], %[dst3], 0x00)
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]),
- RESTRICT_ASM_LOW32
- [ftmp5]"=&f"(ftmp[5])
- : [dst0]"r"(dst), [dst1]"r"(dst+stride),
- [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride),
- [dc]"r"(dc)
- : "memory"
- );
- #else
- int i, dc = (block[0] + 4) >> 3;
- block[0] = 0;
- for (i = 0; i < 4; i++) {
- dst[0] = av_clip_uint8(dst[0] + dc);
- dst[1] = av_clip_uint8(dst[1] + dc);
- dst[2] = av_clip_uint8(dst[2] + dc);
- dst[3] = av_clip_uint8(dst[3] + dc);
- dst += stride;
- }
- #endif
- }
- void ff_vp8_idct_dc_add4y_mmi(uint8_t *dst, int16_t block[4][16],
- ptrdiff_t stride)
- {
- ff_vp8_idct_dc_add_mmi(dst + 0, block[0], stride);
- ff_vp8_idct_dc_add_mmi(dst + 4, block[1], stride);
- ff_vp8_idct_dc_add_mmi(dst + 8, block[2], stride);
- ff_vp8_idct_dc_add_mmi(dst + 12, block[3], stride);
- }
- void ff_vp8_idct_dc_add4uv_mmi(uint8_t *dst, int16_t block[4][16],
- ptrdiff_t stride)
- {
- ff_vp8_idct_dc_add_mmi(dst + stride * 0 + 0, block[0], stride);
- ff_vp8_idct_dc_add_mmi(dst + stride * 0 + 4, block[1], stride);
- ff_vp8_idct_dc_add_mmi(dst + stride * 4 + 0, block[2], stride);
- ff_vp8_idct_dc_add_mmi(dst + stride * 4 + 4, block[3], stride);
- }
- // loop filter applied to edges between macroblocks
- void ff_vp8_v_loop_filter16_mmi(uint8_t *dst, ptrdiff_t stride, int flim_E,
- int flim_I, int hev_thresh)
- {
- vp8_v_loop_filter8_mmi(dst, stride, flim_E, flim_I, hev_thresh);
- vp8_v_loop_filter8_mmi(dst + 8, stride, flim_E, flim_I, hev_thresh);
- }
- void ff_vp8_h_loop_filter16_mmi(uint8_t *dst, ptrdiff_t stride, int flim_E,
- int flim_I, int hev_thresh)
- {
- vp8_h_loop_filter8_mmi(dst, stride, flim_E, flim_I, hev_thresh);
- vp8_h_loop_filter8_mmi(dst + 8 * stride, stride, flim_E, flim_I,
- hev_thresh);
- }
- void ff_vp8_v_loop_filter8uv_mmi(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
- int flim_E, int flim_I, int hev_thresh)
- {
- vp8_v_loop_filter8_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
- vp8_v_loop_filter8_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
- }
- void ff_vp8_h_loop_filter8uv_mmi(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
- int flim_E, int flim_I, int hev_thresh)
- {
- vp8_h_loop_filter8_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
- vp8_h_loop_filter8_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
- }
- // loop filter applied to inner macroblock edges
- void ff_vp8_v_loop_filter16_inner_mmi(uint8_t *dst, ptrdiff_t stride,
- int flim_E, int flim_I, int hev_thresh)
- {
- int i;
- for (i = 0; i < 16; i++)
- if (vp8_normal_limit(dst + i * 1, stride, flim_E, flim_I)) {
- int hv = hev(dst + i * 1, stride, hev_thresh);
- if (hv)
- vp8_filter_common_is4tap(dst + i * 1, stride);
- else
- vp8_filter_common_isnot4tap(dst + i * 1, stride);
- }
- }
- void ff_vp8_h_loop_filter16_inner_mmi(uint8_t *dst, ptrdiff_t stride,
- int flim_E, int flim_I, int hev_thresh)
- {
- int i;
- for (i = 0; i < 16; i++)
- if (vp8_normal_limit(dst + i * stride, 1, flim_E, flim_I)) {
- int hv = hev(dst + i * stride, 1, hev_thresh);
- if (hv)
- vp8_filter_common_is4tap(dst + i * stride, 1);
- else
- vp8_filter_common_isnot4tap(dst + i * stride, 1);
- }
- }
- void ff_vp8_v_loop_filter8uv_inner_mmi(uint8_t *dstU, uint8_t *dstV,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- vp8_v_loop_filter8_inner_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
- vp8_v_loop_filter8_inner_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
- }
- void ff_vp8_h_loop_filter8uv_inner_mmi(uint8_t *dstU, uint8_t *dstV,
- ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
- {
- vp8_h_loop_filter8_inner_mmi(dstU, stride, flim_E, flim_I, hev_thresh);
- vp8_h_loop_filter8_inner_mmi(dstV, stride, flim_E, flim_I, hev_thresh);
- }
- void ff_vp8_v_loop_filter_simple_mmi(uint8_t *dst, ptrdiff_t stride, int flim)
- {
- int i;
- for (i = 0; i < 16; i++)
- if (vp8_simple_limit(dst + i, stride, flim))
- vp8_filter_common_is4tap(dst + i, stride);
- }
- void ff_vp8_h_loop_filter_simple_mmi(uint8_t *dst, ptrdiff_t stride, int flim)
- {
- int i;
- for (i = 0; i < 16; i++)
- if (vp8_simple_limit(dst + i * stride, 1, flim))
- vp8_filter_common_is4tap(dst + i * stride, 1);
- }
- void ff_put_vp8_pixels16_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int x, int y)
- {
- #if 1
- double ftmp[2];
- uint64_t tmp[2];
- mips_reg addr[2];
- DECLARE_VAR_ALL64;
- __asm__ volatile (
- "1: \n\t"
- PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
- MMI_ULDC1(%[ftmp0], %[src], 0x00)
- "ldl %[tmp0], 0x0f(%[src]) \n\t"
- "ldr %[tmp0], 0x08(%[src]) \n\t"
- MMI_ULDC1(%[ftmp1], %[addr0], 0x00)
- "ldl %[tmp1], 0x0f(%[addr0]) \n\t"
- "ldr %[tmp1], 0x08(%[addr0]) \n\t"
- PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
- MMI_SDC1(%[ftmp0], %[dst], 0x00)
- "sdl %[tmp0], 0x0f(%[dst]) \n\t"
- "sdr %[tmp0], 0x08(%[dst]) \n\t"
- "addiu %[h], %[h], -0x02 \n\t"
- MMI_SDC1(%[ftmp1], %[addr1], 0x00)
- PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
- "sdl %[tmp1], 0x0f(%[addr1]) \n\t"
- "sdr %[tmp1], 0x08(%[addr1]) \n\t"
- PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
- RESTRICT_ASM_ALL64
- [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [h]"+&r"(h)
- : [dststride]"r"((mips_reg)dststride),
- [srcstride]"r"((mips_reg)srcstride)
- : "memory"
- );
- #else
- int i;
- for (i = 0; i < h; i++, dst += dststride, src += srcstride)
- memcpy(dst, src, 16);
- #endif
- }
- void ff_put_vp8_pixels8_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int x, int y)
- {
- #if 1
- double ftmp[1];
- uint64_t tmp[1];
- mips_reg addr[2];
- DECLARE_VAR_ALL64;
- __asm__ volatile (
- "1: \n\t"
- PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
- MMI_ULDC1(%[ftmp0], %[src], 0x00)
- "ldl %[tmp0], 0x07(%[addr0]) \n\t"
- "ldr %[tmp0], 0x00(%[addr0]) \n\t"
- PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
- MMI_SDC1(%[ftmp0], %[dst], 0x00)
- "addiu %[h], %[h], -0x02 \n\t"
- "sdl %[tmp0], 0x07(%[addr1]) \n\t"
- "sdr %[tmp0], 0x00(%[addr1]) \n\t"
- PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [h]"+&r"(h)
- : [dststride]"r"((mips_reg)dststride),
- [srcstride]"r"((mips_reg)srcstride)
- : "memory"
- );
- #else
- int i;
- for (i = 0; i < h; i++, dst += dststride, src += srcstride)
- memcpy(dst, src, 8);
- #endif
- }
- void ff_put_vp8_pixels4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int x, int y)
- {
- #if 1
- double ftmp[1];
- uint64_t tmp[1];
- mips_reg addr[2];
- DECLARE_VAR_LOW32;
- __asm__ volatile (
- "1: \n\t"
- PTR_ADDU "%[addr0], %[src], %[srcstride] \n\t"
- MMI_LWC1(%[ftmp0], %[src], 0x00)
- "lwl %[tmp0], 0x03(%[addr0]) \n\t"
- "lwr %[tmp0], 0x00(%[addr0]) \n\t"
- PTR_ADDU "%[addr1], %[dst], %[dststride] \n\t"
- MMI_SWC1(%[ftmp0], %[dst], 0x00)
- "addiu %[h], %[h], -0x02 \n\t"
- "swl %[tmp0], 0x03(%[addr1]) \n\t"
- "swr %[tmp0], 0x00(%[addr1]) \n\t"
- PTR_ADDU "%[src], %[addr0], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[addr1], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- [addr0]"=&r"(addr[0]), [addr1]"=&r"(addr[1]),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [h]"+&r"(h)
- : [dststride]"r"((mips_reg)dststride),
- [srcstride]"r"((mips_reg)srcstride)
- : "memory"
- );
- #else
- int i;
- for (i = 0; i < h; i++, dst += dststride, src += srcstride)
- memcpy(dst, src, 4);
- #endif
- }
- void ff_put_vp8_epel16_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[9];
- uint32_t tmp[1];
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- mips_reg src1, dst1;
- DECLARE_VAR_ALL64;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
- dst[4] = cm[(filter[2] * src[4] - filter[1] * src[ 3] + filter[3] * src[5] - filter[4] * src[6] + 64) >> 7];
- dst[5] = cm[(filter[2] * src[5] - filter[1] * src[ 4] + filter[3] * src[6] - filter[4] * src[7] + 64) >> 7];
- dst[6] = cm[(filter[2] * src[6] - filter[1] * src[ 5] + filter[3] * src[7] - filter[4] * src[8] + 64) >> 7];
- dst[7] = cm[(filter[2] * src[7] - filter[1] * src[ 6] + filter[3] * src[8] - filter[4] * src[9] + 64) >> 7];
- dst[ 8] = cm[(filter[2] * src[ 8] - filter[1] * src[ 7] + filter[3] * src[ 9] - filter[4] * src[10] + 64) >> 7];
- dst[ 9] = cm[(filter[2] * src[ 9] - filter[1] * src[ 8] + filter[3] * src[10] - filter[4] * src[11] + 64) >> 7];
- dst[10] = cm[(filter[2] * src[10] - filter[1] * src[ 9] + filter[3] * src[11] - filter[4] * src[12] + 64) >> 7];
- dst[11] = cm[(filter[2] * src[11] - filter[1] * src[10] + filter[3] * src[12] - filter[4] * src[13] + 64) >> 7];
- dst[12] = cm[(filter[2] * src[12] - filter[1] * src[11] + filter[3] * src[13] - filter[4] * src[14] + 64) >> 7];
- dst[13] = cm[(filter[2] * src[13] - filter[1] * src[12] + filter[3] * src[14] - filter[4] * src[15] + 64) >> 7];
- dst[14] = cm[(filter[2] * src[14] - filter[1] * src[13] + filter[3] * src[15] - filter[4] * src[16] + 64) >> 7];
- dst[15] = cm[(filter[2] * src[15] - filter[1] * src[14] + filter[3] * src[16] - filter[4] * src[17] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_EPEL8_H4_MMI(%[src], %[dst])
- PTR_ADDIU "%[src1], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst1], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_EPEL8_H4_MMI(%[src1], %[dst1])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [dst1]"=&r"(dst1), [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_4TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel8_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[9];
- uint32_t tmp[1];
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- DECLARE_VAR_ALL64;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
- dst[4] = cm[(filter[2] * src[4] - filter[1] * src[ 3] + filter[3] * src[5] - filter[4] * src[6] + 64) >> 7];
- dst[5] = cm[(filter[2] * src[5] - filter[1] * src[ 4] + filter[3] * src[6] - filter[4] * src[7] + 64) >> 7];
- dst[6] = cm[(filter[2] * src[6] - filter[1] * src[ 5] + filter[3] * src[7] - filter[4] * src[8] + 64) >> 7];
- dst[7] = cm[(filter[2] * src[7] - filter[1] * src[ 6] + filter[3] * src[8] - filter[4] * src[9] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL8_H4_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_4TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel4_h4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[6];
- uint32_t tmp[1];
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- DECLARE_VAR_LOW32;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[-1] + filter[3] * src[1] - filter[4] * src[2] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[ 0] + filter[3] * src[2] - filter[4] * src[3] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[ 1] + filter[3] * src[3] - filter[4] * src[4] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[ 2] + filter[3] * src[4] - filter[4] * src[5] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL4_H4_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_4TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel16_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[9];
- uint32_t tmp[1];
- mips_reg src1, dst1;
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_ALL64;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[ 0] = cm[(filter[2]*src[ 0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[ 1] - filter[4]*src[ 2] + filter[5]*src[ 3] + 64) >> 7];
- dst[ 1] = cm[(filter[2]*src[ 1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[ 2] - filter[4]*src[ 3] + filter[5]*src[ 4] + 64) >> 7];
- dst[ 2] = cm[(filter[2]*src[ 2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[ 3] - filter[4]*src[ 4] + filter[5]*src[ 5] + 64) >> 7];
- dst[ 3] = cm[(filter[2]*src[ 3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[ 4] - filter[4]*src[ 5] + filter[5]*src[ 6] + 64) >> 7];
- dst[ 4] = cm[(filter[2]*src[ 4] - filter[1]*src[ 3] + filter[0]*src[ 2] + filter[3]*src[ 5] - filter[4]*src[ 6] + filter[5]*src[ 7] + 64) >> 7];
- dst[ 5] = cm[(filter[2]*src[ 5] - filter[1]*src[ 4] + filter[0]*src[ 3] + filter[3]*src[ 6] - filter[4]*src[ 7] + filter[5]*src[ 8] + 64) >> 7];
- dst[ 6] = cm[(filter[2]*src[ 6] - filter[1]*src[ 5] + filter[0]*src[ 4] + filter[3]*src[ 7] - filter[4]*src[ 8] + filter[5]*src[ 9] + 64) >> 7];
- dst[ 7] = cm[(filter[2]*src[ 7] - filter[1]*src[ 6] + filter[0]*src[ 5] + filter[3]*src[ 8] - filter[4]*src[ 9] + filter[5]*src[10] + 64) >> 7];
- dst[ 8] = cm[(filter[2]*src[ 8] - filter[1]*src[ 7] + filter[0]*src[ 6] + filter[3]*src[ 9] - filter[4]*src[10] + filter[5]*src[11] + 64) >> 7];
- dst[ 9] = cm[(filter[2]*src[ 9] - filter[1]*src[ 8] + filter[0]*src[ 7] + filter[3]*src[10] - filter[4]*src[11] + filter[5]*src[12] + 64) >> 7];
- dst[10] = cm[(filter[2]*src[10] - filter[1]*src[ 9] + filter[0]*src[ 8] + filter[3]*src[11] - filter[4]*src[12] + filter[5]*src[13] + 64) >> 7];
- dst[11] = cm[(filter[2]*src[11] - filter[1]*src[10] + filter[0]*src[ 9] + filter[3]*src[12] - filter[4]*src[13] + filter[5]*src[14] + 64) >> 7];
- dst[12] = cm[(filter[2]*src[12] - filter[1]*src[11] + filter[0]*src[10] + filter[3]*src[13] - filter[4]*src[14] + filter[5]*src[15] + 64) >> 7];
- dst[13] = cm[(filter[2]*src[13] - filter[1]*src[12] + filter[0]*src[11] + filter[3]*src[14] - filter[4]*src[15] + filter[5]*src[16] + 64) >> 7];
- dst[14] = cm[(filter[2]*src[14] - filter[1]*src[13] + filter[0]*src[12] + filter[3]*src[15] - filter[4]*src[16] + filter[5]*src[17] + 64) >> 7];
- dst[15] = cm[(filter[2]*src[15] - filter[1]*src[14] + filter[0]*src[13] + filter[3]*src[16] - filter[4]*src[17] + filter[5]*src[18] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_EPEL8_H6_MMI(%[src], %[dst])
- PTR_ADDIU "%[src1], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst1], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_EPEL8_H6_MMI(%[src1], %[dst1])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [dst1]"=&r"(dst1), [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_6TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel8_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[9];
- uint32_t tmp[1];
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_ALL64;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[0] = cm[(filter[2]*src[0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[1] - filter[4]*src[2] + filter[5]*src[ 3] + 64) >> 7];
- dst[1] = cm[(filter[2]*src[1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[2] - filter[4]*src[3] + filter[5]*src[ 4] + 64) >> 7];
- dst[2] = cm[(filter[2]*src[2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[3] - filter[4]*src[4] + filter[5]*src[ 5] + 64) >> 7];
- dst[3] = cm[(filter[2]*src[3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[4] - filter[4]*src[5] + filter[5]*src[ 6] + 64) >> 7];
- dst[4] = cm[(filter[2]*src[4] - filter[1]*src[ 3] + filter[0]*src[ 2] + filter[3]*src[5] - filter[4]*src[6] + filter[5]*src[ 7] + 64) >> 7];
- dst[5] = cm[(filter[2]*src[5] - filter[1]*src[ 4] + filter[0]*src[ 3] + filter[3]*src[6] - filter[4]*src[7] + filter[5]*src[ 8] + 64) >> 7];
- dst[6] = cm[(filter[2]*src[6] - filter[1]*src[ 5] + filter[0]*src[ 4] + filter[3]*src[7] - filter[4]*src[8] + filter[5]*src[ 9] + 64) >> 7];
- dst[7] = cm[(filter[2]*src[7] - filter[1]*src[ 6] + filter[0]*src[ 5] + filter[3]*src[8] - filter[4]*src[9] + filter[5]*src[10] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL8_H6_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_6TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel4_h6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[mx - 1];
- double ftmp[6];
- uint32_t tmp[1];
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_LOW32;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[0] = cm[(filter[2]*src[0] - filter[1]*src[-1] + filter[0]*src[-2] + filter[3]*src[1] - filter[4]*src[2] + filter[5]*src[ 3] + 64) >> 7];
- dst[1] = cm[(filter[2]*src[1] - filter[1]*src[ 0] + filter[0]*src[-1] + filter[3]*src[2] - filter[4]*src[3] + filter[5]*src[ 4] + 64) >> 7];
- dst[2] = cm[(filter[2]*src[2] - filter[1]*src[ 1] + filter[0]*src[ 0] + filter[3]*src[3] - filter[4]*src[4] + filter[5]*src[ 5] + 64) >> 7];
- dst[3] = cm[(filter[2]*src[3] - filter[1]*src[ 2] + filter[0]*src[ 1] + filter[3]*src[4] - filter[4]*src[5] + filter[5]*src[ 6] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL4_H6_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_6TAP(src, filter, 1);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel16_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[9];
- uint32_t tmp[1];
- mips_reg src0, src1, dst0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- DECLARE_VAR_ALL64;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
- dst[4] = cm[(filter[2] * src[4] - filter[1] * src[4-srcstride] + filter[3] * src[4+srcstride] - filter[4] * src[4+2*srcstride] + 64) >> 7];
- dst[5] = cm[(filter[2] * src[5] - filter[1] * src[5-srcstride] + filter[3] * src[5+srcstride] - filter[4] * src[5+2*srcstride] + 64) >> 7];
- dst[6] = cm[(filter[2] * src[6] - filter[1] * src[6-srcstride] + filter[3] * src[6+srcstride] - filter[4] * src[6+2*srcstride] + 64) >> 7];
- dst[7] = cm[(filter[2] * src[7] - filter[1] * src[7-srcstride] + filter[3] * src[7+srcstride] - filter[4] * src[7+2*srcstride] + 64) >> 7];
- dst[ 8] = cm[(filter[2] * src[ 8] - filter[1] * src[ 8-srcstride] + filter[3] * src[ 8+srcstride] - filter[4] * src[ 8+2*srcstride] + 64) >> 7];
- dst[ 9] = cm[(filter[2] * src[ 9] - filter[1] * src[ 9-srcstride] + filter[3] * src[ 9+srcstride] - filter[4] * src[ 9+2*srcstride] + 64) >> 7];
- dst[10] = cm[(filter[2] * src[10] - filter[1] * src[10-srcstride] + filter[3] * src[10+srcstride] - filter[4] * src[10+2*srcstride] + 64) >> 7];
- dst[11] = cm[(filter[2] * src[11] - filter[1] * src[11-srcstride] + filter[3] * src[11+srcstride] - filter[4] * src[11+2*srcstride] + 64) >> 7];
- dst[12] = cm[(filter[2] * src[12] - filter[1] * src[12-srcstride] + filter[3] * src[12+srcstride] - filter[4] * src[12+2*srcstride] + 64) >> 7];
- dst[13] = cm[(filter[2] * src[13] - filter[1] * src[13-srcstride] + filter[3] * src[13+srcstride] - filter[4] * src[13+2*srcstride] + 64) >> 7];
- dst[14] = cm[(filter[2] * src[14] - filter[1] * src[14-srcstride] + filter[3] * src[14+srcstride] - filter[4] * src[14+2*srcstride] + 64) >> 7];
- dst[15] = cm[(filter[2] * src[15] - filter[1] * src[15-srcstride] + filter[3] * src[15+srcstride] - filter[4] * src[15+2*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_EPEL8_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
- PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_EPEL8_V4_MMI(%[src0], %[src1], %[dst], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src0]"=&r"(src0), [dst0]"=&r"(dst0),
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_4TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel8_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[9];
- uint32_t tmp[1];
- mips_reg src1;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- DECLARE_VAR_ALL64;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
- dst[4] = cm[(filter[2] * src[4] - filter[1] * src[4-srcstride] + filter[3] * src[4+srcstride] - filter[4] * src[4+2*srcstride] + 64) >> 7];
- dst[5] = cm[(filter[2] * src[5] - filter[1] * src[5-srcstride] + filter[3] * src[5+srcstride] - filter[4] * src[5+2*srcstride] + 64) >> 7];
- dst[6] = cm[(filter[2] * src[6] - filter[1] * src[6-srcstride] + filter[3] * src[6+srcstride] - filter[4] * src[6+2*srcstride] + 64) >> 7];
- dst[7] = cm[(filter[2] * src[7] - filter[1] * src[7-srcstride] + filter[3] * src[7+srcstride] - filter[4] * src[7+2*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL8_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_4TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel4_v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[6];
- uint32_t tmp[1];
- mips_reg src1;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- DECLARE_VAR_LOW32;
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- /*
- dst[0] = cm[(filter[2] * src[0] - filter[1] * src[ -srcstride] + filter[3] * src[ srcstride] - filter[4] * src[ 2*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2] * src[1] - filter[1] * src[1-srcstride] + filter[3] * src[1+srcstride] - filter[4] * src[1+2*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2] * src[2] - filter[1] * src[2-srcstride] + filter[3] * src[2+srcstride] - filter[4] * src[2+2*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2] * src[3] - filter[1] * src[3-srcstride] + filter[3] * src[3+srcstride] - filter[4] * src[3+2*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL4_V4_MMI(%[src], %[src1], %[dst], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter1]"f"(filter1.f), [filter2]"f"(filter2.f),
- [filter3]"f"(filter3.f), [filter4]"f"(filter4.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_4TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel16_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[9];
- uint32_t tmp[1];
- mips_reg src0, src1, dst0;
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_ALL64;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
- dst[4] = cm[(filter[2]*src[4] - filter[1]*src[4-srcstride] + filter[0]*src[4-2*srcstride] + filter[3]*src[4+srcstride] - filter[4]*src[4+2*srcstride] + filter[5]*src[4+3*srcstride] + 64) >> 7];
- dst[5] = cm[(filter[2]*src[5] - filter[1]*src[5-srcstride] + filter[0]*src[5-2*srcstride] + filter[3]*src[5+srcstride] - filter[4]*src[5+2*srcstride] + filter[5]*src[5+3*srcstride] + 64) >> 7];
- dst[6] = cm[(filter[2]*src[6] - filter[1]*src[6-srcstride] + filter[0]*src[6-2*srcstride] + filter[3]*src[6+srcstride] - filter[4]*src[6+2*srcstride] + filter[5]*src[6+3*srcstride] + 64) >> 7];
- dst[7] = cm[(filter[2]*src[7] - filter[1]*src[7-srcstride] + filter[0]*src[7-2*srcstride] + filter[3]*src[7+srcstride] - filter[4]*src[7+2*srcstride] + filter[5]*src[7+3*srcstride] + 64) >> 7];
- dst[ 8] = cm[(filter[2]*src[ 8] - filter[1]*src[ 8-srcstride] + filter[0]*src[ 8-2*srcstride] + filter[3]*src[ 8+srcstride] - filter[4]*src[ 8+2*srcstride] + filter[5]*src[ 8+3*srcstride] + 64) >> 7];
- dst[ 9] = cm[(filter[2]*src[ 9] - filter[1]*src[ 9-srcstride] + filter[0]*src[ 9-2*srcstride] + filter[3]*src[ 9+srcstride] - filter[4]*src[ 9+2*srcstride] + filter[5]*src[ 9+3*srcstride] + 64) >> 7];
- dst[10] = cm[(filter[2]*src[10] - filter[1]*src[10-srcstride] + filter[0]*src[10-2*srcstride] + filter[3]*src[10+srcstride] - filter[4]*src[10+2*srcstride] + filter[5]*src[10+3*srcstride] + 64) >> 7];
- dst[11] = cm[(filter[2]*src[11] - filter[1]*src[11-srcstride] + filter[0]*src[11-2*srcstride] + filter[3]*src[11+srcstride] - filter[4]*src[11+2*srcstride] + filter[5]*src[11+3*srcstride] + 64) >> 7];
- dst[12] = cm[(filter[2]*src[12] - filter[1]*src[12-srcstride] + filter[0]*src[12-2*srcstride] + filter[3]*src[12+srcstride] - filter[4]*src[12+2*srcstride] + filter[5]*src[12+3*srcstride] + 64) >> 7];
- dst[13] = cm[(filter[2]*src[13] - filter[1]*src[13-srcstride] + filter[0]*src[13-2*srcstride] + filter[3]*src[13+srcstride] - filter[4]*src[13+2*srcstride] + filter[5]*src[13+3*srcstride] + 64) >> 7];
- dst[14] = cm[(filter[2]*src[14] - filter[1]*src[14-srcstride] + filter[0]*src[14-2*srcstride] + filter[3]*src[14+srcstride] - filter[4]*src[14+2*srcstride] + filter[5]*src[14+3*srcstride] + 64) >> 7];
- dst[15] = cm[(filter[2]*src[15] - filter[1]*src[15-srcstride] + filter[0]*src[15-2*srcstride] + filter[3]*src[15+srcstride] - filter[4]*src[15+2*srcstride] + filter[5]*src[15+3*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_EPEL8_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
- PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_EPEL8_V6_MMI(%[src0], %[src1], %[dst0], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src0]"=&r"(src0), [dst0]"=&r"(dst0),
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_6TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel8_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[9];
- uint32_t tmp[1];
- mips_reg src1;
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_ALL64;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
- dst[4] = cm[(filter[2]*src[4] - filter[1]*src[4-srcstride] + filter[0]*src[4-2*srcstride] + filter[3]*src[4+srcstride] - filter[4]*src[4+2*srcstride] + filter[5]*src[4+3*srcstride] + 64) >> 7];
- dst[5] = cm[(filter[2]*src[5] - filter[1]*src[5-srcstride] + filter[0]*src[5-2*srcstride] + filter[3]*src[5+srcstride] - filter[4]*src[5+2*srcstride] + filter[5]*src[5+3*srcstride] + 64) >> 7];
- dst[6] = cm[(filter[2]*src[6] - filter[1]*src[6-srcstride] + filter[0]*src[6-2*srcstride] + filter[3]*src[6+srcstride] - filter[4]*src[6+2*srcstride] + filter[5]*src[6+3*srcstride] + 64) >> 7];
- dst[7] = cm[(filter[2]*src[7] - filter[1]*src[7-srcstride] + filter[0]*src[7-2*srcstride] + filter[3]*src[7+srcstride] - filter[4]*src[7+2*srcstride] + filter[5]*src[7+3*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL8_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
- [ftmp8]"=&f"(ftmp[8]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_6TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel4_v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- const uint64_t *filter = fourtap_subpel_filters[my - 1];
- double ftmp[6];
- uint32_t tmp[1];
- mips_reg src1;
- union av_intfloat64 filter0;
- union av_intfloat64 filter1;
- union av_intfloat64 filter2;
- union av_intfloat64 filter3;
- union av_intfloat64 filter4;
- union av_intfloat64 filter5;
- DECLARE_VAR_LOW32;
- filter0.i = filter[0];
- filter1.i = filter[1];
- filter2.i = filter[2];
- filter3.i = filter[3];
- filter4.i = filter[4];
- filter5.i = filter[5];
- /*
- dst[0] = cm[(filter[2]*src[0] - filter[1]*src[0-srcstride] + filter[0]*src[0-2*srcstride] + filter[3]*src[0+srcstride] - filter[4]*src[0+2*srcstride] + filter[5]*src[0+3*srcstride] + 64) >> 7];
- dst[1] = cm[(filter[2]*src[1] - filter[1]*src[1-srcstride] + filter[0]*src[1-2*srcstride] + filter[3]*src[1+srcstride] - filter[4]*src[1+2*srcstride] + filter[5]*src[1+3*srcstride] + 64) >> 7];
- dst[2] = cm[(filter[2]*src[2] - filter[1]*src[2-srcstride] + filter[0]*src[2-2*srcstride] + filter[3]*src[2+srcstride] - filter[4]*src[2+2*srcstride] + filter[5]*src[2+3*srcstride] + 64) >> 7];
- dst[3] = cm[(filter[2]*src[3] - filter[1]*src[3-srcstride] + filter[0]*src[3-2*srcstride] + filter[3]*src[3+srcstride] - filter[4]*src[3+2*srcstride] + filter[5]*src[3+3*srcstride] + 64) >> 7];
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x07 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "1: \n\t"
- PUT_VP8_EPEL4_V6_MMI(%[src], %[src1], %[dst], %[srcstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[srcstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dststride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src)
- : [ff_pw_64]"f"(ff_pw_64.f),
- [srcstride]"r"((mips_reg)srcstride),
- [dststride]"r"((mips_reg)dststride),
- [filter0]"f"(filter0.f), [filter1]"f"(filter1.f),
- [filter2]"f"(filter2.f), [filter3]"f"(filter3.f),
- [filter4]"f"(filter4.f), [filter5]"f"(filter5.f)
- : "memory"
- );
- #else
- const uint8_t *filter = subpel_filters[my - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_6TAP(src, filter, srcstride);
- dst += dststride;
- src += srcstride;
- }
- #endif
- }
- void ff_put_vp8_epel16_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[560]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel16_h4_mmi(tmp, 16, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 16;
- ff_put_vp8_epel16_v4_mmi(dst, dststride, tmp, 16, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[560];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 16; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 16;
- src += srcstride;
- }
- tmp = tmp_array + 16;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 16);
- dst += dststride;
- tmp += 16;
- }
- #endif
- }
- void ff_put_vp8_epel8_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[152]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel8_h4_mmi(tmp, 8, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 8;
- ff_put_vp8_epel8_v4_mmi(dst, dststride, tmp, 8, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[152];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 8; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 8;
- src += srcstride;
- }
- tmp = tmp_array + 8;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 8);
- dst += dststride;
- tmp += 8;
- }
- #endif
- }
- void ff_put_vp8_epel4_h4v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(4, uint8_t, tmp_array[44]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel4_h4_mmi(tmp, 4, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 4;
- ff_put_vp8_epel4_v4_mmi(dst, dststride, tmp, 4, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[44];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 4; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 4;
- src += srcstride;
- }
- tmp = tmp_array + 4;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 4);
- dst += dststride;
- tmp += 4;
- }
- #endif
- }
- void ff_put_vp8_epel16_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[592]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel16_h4_mmi(tmp, 16, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 32;
- ff_put_vp8_epel16_v6_mmi(dst, dststride, tmp, 16, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[592];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 16; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 16;
- src += srcstride;
- }
- tmp = tmp_array + 32;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 16);
- dst += dststride;
- tmp += 16;
- }
- #endif
- }
- void ff_put_vp8_epel8_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[168]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel8_h4_mmi(tmp, 8, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 16;
- ff_put_vp8_epel8_v6_mmi(dst, dststride, tmp, 8, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[168];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 8; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 8;
- src += srcstride;
- }
- tmp = tmp_array + 16;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 8);
- dst += dststride;
- tmp += 8;
- }
- #endif
- }
- void ff_put_vp8_epel4_h4v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(4, uint8_t, tmp_array[52]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel4_h4_mmi(tmp, 4, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 8;
- ff_put_vp8_epel4_v6_mmi(dst, dststride, tmp, 4, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[52];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 4; x++)
- tmp[x] = FILTER_4TAP(src, filter, 1);
- tmp += 4;
- src += srcstride;
- }
- tmp = tmp_array + 8;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 4);
- dst += dststride;
- tmp += 4;
- }
- #endif
- }
- void ff_put_vp8_epel16_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[560]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel16_h6_mmi(tmp, 16, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 16;
- ff_put_vp8_epel16_v4_mmi(dst, dststride, tmp, 16, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[560];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 16; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 16;
- src += srcstride;
- }
- tmp = tmp_array + 16;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 16);
- dst += dststride;
- tmp += 16;
- }
- #endif
- }
- void ff_put_vp8_epel8_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[152]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel8_h6_mmi(tmp, 8, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 8;
- ff_put_vp8_epel8_v4_mmi(dst, dststride, tmp, 8, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[152];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 8; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 8;
- src += srcstride;
- }
- tmp = tmp_array + 8;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 8);
- dst += dststride;
- tmp += 8;
- }
- #endif
- }
- void ff_put_vp8_epel4_h6v4_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(4, uint8_t, tmp_array[44]);
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- ff_put_vp8_epel4_h6_mmi(tmp, 4, src, srcstride, h + 3, mx, my);
- tmp = tmp_array + 4;
- ff_put_vp8_epel4_v4_mmi(dst, dststride, tmp, 4, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[44];
- uint8_t *tmp = tmp_array;
- src -= srcstride;
- for (y = 0; y < h + 3; y++) {
- for (x = 0; x < 4; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 4;
- src += srcstride;
- }
- tmp = tmp_array + 4;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_4TAP(tmp, filter, 4);
- dst += dststride;
- tmp += 4;
- }
- #endif
- }
- void ff_put_vp8_epel16_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[592]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel16_h6_mmi(tmp, 16, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 32;
- ff_put_vp8_epel16_v6_mmi(dst, dststride, tmp, 16, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[592];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 16; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 16;
- src += srcstride;
- }
- tmp = tmp_array + 32;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 16);
- dst += dststride;
- tmp += 16;
- }
- #endif
- }
- void ff_put_vp8_epel8_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[168]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel8_h6_mmi(tmp, 8, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 16;
- ff_put_vp8_epel8_v6_mmi(dst, dststride, tmp, 8, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[168];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 8; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 8;
- src += srcstride;
- }
- tmp = tmp_array + 16;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 8);
- dst += dststride;
- tmp += 8;
- }
- #endif
- }
- void ff_put_vp8_epel4_h6v6_mmi(uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
- ptrdiff_t srcstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(4, uint8_t, tmp_array[52]);
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- ff_put_vp8_epel4_h6_mmi(tmp, 4, src, srcstride, h + 5, mx, my);
- tmp = tmp_array + 8;
- ff_put_vp8_epel4_v6_mmi(dst, dststride, tmp, 4, h, mx, my);
- #else
- const uint8_t *filter = subpel_filters[mx - 1];
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- int x, y;
- uint8_t tmp_array[52];
- uint8_t *tmp = tmp_array;
- src -= 2 * srcstride;
- for (y = 0; y < h + 5; y++) {
- for (x = 0; x < 4; x++)
- tmp[x] = FILTER_6TAP(src, filter, 1);
- tmp += 4;
- src += srcstride;
- }
- tmp = tmp_array + 8;
- filter = subpel_filters[my - 1];
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = FILTER_6TAP(tmp, filter, 4);
- dst += dststride;
- tmp += 4;
- }
- #endif
- }
- void ff_put_vp8_bilinear16_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 a, b;
- double ftmp[7];
- uint32_t tmp[1];
- mips_reg dst0, src0;
- DECLARE_VAR_ALL64;
- a.i = 8 - mx;
- b.i = mx;
- /*
- dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
- dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
- dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
- dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
- dst[4] = (a * src[4] + b * src[5] + 4) >> 3;
- dst[5] = (a * src[5] + b * src[6] + 4) >> 3;
- dst[6] = (a * src[6] + b * src[7] + 4) >> 3;
- dst[7] = (a * src[7] + b * src[8] + 4) >> 3;
- dst[ 8] = (a * src[ 8] + b * src[ 9] + 4) >> 3;
- dst[ 9] = (a * src[ 9] + b * src[10] + 4) >> 3;
- dst[10] = (a * src[10] + b * src[11] + 4) >> 3;
- dst[11] = (a * src[11] + b * src[12] + 4) >> 3;
- dst[12] = (a * src[12] + b * src[13] + 4) >> 3;
- dst[13] = (a * src[13] + b * src[14] + 4) >> 3;
- dst[14] = (a * src[14] + b * src[15] + 4) >> 3;
- dst[15] = (a * src[15] + b * src[16] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[a], %[a], %[ftmp0] \n\t"
- "pshufh %[b], %[b], %[ftmp0] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_BILINEAR8_H_MMI(%[src], %[dst])
- PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_BILINEAR8_H_MMI(%[src0], %[dst0])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [dst0]"=&r"(dst0), [src0]"=&r"(src0),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [a]"+&f"(a.f), [b]"+&f"(b.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int a = 8 - mx, b = mx;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear16_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 c, d;
- double ftmp[7];
- uint32_t tmp[1];
- mips_reg src0, src1, dst0;
- DECLARE_VAR_ALL64;
- c.i = 8 - my;
- d.i = my;
- /*
- dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
- dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
- dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
- dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
- dst[4] = (c * src[4] + d * src[4 + sstride] + 4) >> 3;
- dst[5] = (c * src[5] + d * src[5 + sstride] + 4) >> 3;
- dst[6] = (c * src[6] + d * src[6 + sstride] + 4) >> 3;
- dst[7] = (c * src[7] + d * src[7 + sstride] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[c], %[c], %[ftmp0] \n\t"
- "pshufh %[d], %[d], %[ftmp0] \n\t"
- "1: \n\t"
- // 0 - 7
- PUT_VP8_BILINEAR8_V_MMI(%[src], %[src1], %[dst], %[sstride])
- PTR_ADDIU "%[src0], %[src], 0x08 \n\t"
- PTR_ADDIU "%[dst0], %[dst], 0x08 \n\t"
- // 8 - 15
- PUT_VP8_BILINEAR8_V_MMI(%[src0], %[src1], %[dst0], %[sstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src0]"=&r"(src0), [dst0]"=&r"(dst0),
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [c]"+&f"(c.f), [d]"+&f"(d.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int c = 8 - my, d = my;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear16_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[528]);
- uint8_t *tmp = tmp_array;
- ff_put_vp8_bilinear16_h_mmi(tmp, 16, src, sstride, h + 1, mx, my);
- ff_put_vp8_bilinear16_v_mmi(dst, dstride, tmp, 16, h, mx, my);
- #else
- int a = 8 - mx, b = mx;
- int c = 8 - my, d = my;
- int x, y;
- uint8_t tmp_array[528];
- uint8_t *tmp = tmp_array;
- for (y = 0; y < h + 1; y++) {
- for (x = 0; x < 16; x++)
- tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- tmp += 16;
- src += sstride;
- }
- tmp = tmp_array;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 16; x++)
- dst[x] = (c * tmp[x] + d * tmp[x + 16] + 4) >> 3;
- dst += dstride;
- tmp += 16;
- }
- #endif
- }
- void ff_put_vp8_bilinear8_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 a, b;
- double ftmp[7];
- uint32_t tmp[1];
- DECLARE_VAR_ALL64;
- a.i = 8 - mx;
- b.i = mx;
- /*
- dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
- dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
- dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
- dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
- dst[4] = (a * src[4] + b * src[5] + 4) >> 3;
- dst[5] = (a * src[5] + b * src[6] + 4) >> 3;
- dst[6] = (a * src[6] + b * src[7] + 4) >> 3;
- dst[7] = (a * src[7] + b * src[8] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[a], %[a], %[ftmp0] \n\t"
- "pshufh %[b], %[b], %[ftmp0] \n\t"
- "1: \n\t"
- PUT_VP8_BILINEAR8_H_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [a]"+&f"(a.f), [b]"+&f"(b.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int a = 8 - mx, b = mx;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear8_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 c, d;
- double ftmp[7];
- uint32_t tmp[1];
- mips_reg src1;
- DECLARE_VAR_ALL64;
- c.i = 8 - my;
- d.i = my;
- /*
- dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
- dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
- dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
- dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
- dst[4] = (c * src[4] + d * src[4 + sstride] + 4) >> 3;
- dst[5] = (c * src[5] + d * src[5 + sstride] + 4) >> 3;
- dst[6] = (c * src[6] + d * src[6 + sstride] + 4) >> 3;
- dst[7] = (c * src[7] + d * src[7 + sstride] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[c], %[c], %[ftmp0] \n\t"
- "pshufh %[d], %[d], %[ftmp0] \n\t"
- "1: \n\t"
- PUT_VP8_BILINEAR8_V_MMI(%[src], %[src1], %[dst], %[sstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
- [ftmp6]"=&f"(ftmp[6]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_ALL64
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [c]"+&f"(c.f), [d]"+&f"(d.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int c = 8 - my, d = my;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear8_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(8, uint8_t, tmp_array[136]);
- uint8_t *tmp = tmp_array;
- ff_put_vp8_bilinear8_h_mmi(tmp, 8, src, sstride, h + 1, mx, my);
- ff_put_vp8_bilinear8_v_mmi(dst, dstride, tmp, 8, h, mx, my);
- #else
- int a = 8 - mx, b = mx;
- int c = 8 - my, d = my;
- int x, y;
- uint8_t tmp_array[136];
- uint8_t *tmp = tmp_array;
- for (y = 0; y < h + 1; y++) {
- for (x = 0; x < 8; x++)
- tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- tmp += 8;
- src += sstride;
- }
- tmp = tmp_array;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 8; x++)
- dst[x] = (c * tmp[x] + d * tmp[x + 8] + 4) >> 3;
- dst += dstride;
- tmp += 8;
- }
- #endif
- }
- void ff_put_vp8_bilinear4_h_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 a, b;
- double ftmp[5];
- uint32_t tmp[1];
- DECLARE_VAR_LOW32;
- DECLARE_VAR_ALL64;
- a.i = 8 - mx;
- b.i = mx;
- /*
- dst[0] = (a * src[0] + b * src[1] + 4) >> 3;
- dst[1] = (a * src[1] + b * src[2] + 4) >> 3;
- dst[2] = (a * src[2] + b * src[3] + 4) >> 3;
- dst[3] = (a * src[3] + b * src[4] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[a], %[a], %[ftmp0] \n\t"
- "pshufh %[b], %[b], %[ftmp0] \n\t"
- "1: \n\t"
- PUT_VP8_BILINEAR4_H_MMI(%[src], %[dst])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- RESTRICT_ASM_ALL64
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [a]"+&f"(a.f), [b]"+&f"(b.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int a = 8 - mx, b = mx;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear4_v_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- union mmi_intfloat64 c, d;
- double ftmp[7];
- uint32_t tmp[1];
- mips_reg src1;
- DECLARE_VAR_LOW32;
- DECLARE_VAR_ALL64;
- c.i = 8 - my;
- d.i = my;
- /*
- dst[0] = (c * src[0] + d * src[ sstride] + 4) >> 3;
- dst[1] = (c * src[1] + d * src[1 + sstride] + 4) >> 3;
- dst[2] = (c * src[2] + d * src[2 + sstride] + 4) >> 3;
- dst[3] = (c * src[3] + d * src[3 + sstride] + 4) >> 3;
- */
- __asm__ volatile (
- "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
- "li %[tmp0], 0x03 \n\t"
- "mtc1 %[tmp0], %[ftmp4] \n\t"
- "pshufh %[c], %[c], %[ftmp0] \n\t"
- "pshufh %[d], %[d], %[ftmp0] \n\t"
- "1: \n\t"
- PUT_VP8_BILINEAR4_V_MMI(%[src], %[src1], %[dst], %[sstride])
- "addiu %[h], %[h], -0x01 \n\t"
- PTR_ADDU "%[src], %[src], %[sstride] \n\t"
- PTR_ADDU "%[dst], %[dst], %[dstride] \n\t"
- "bnez %[h], 1b \n\t"
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
- [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
- [ftmp4]"=&f"(ftmp[4]),
- [tmp0]"=&r"(tmp[0]),
- RESTRICT_ASM_LOW32
- RESTRICT_ASM_ALL64
- [src1]"=&r"(src1),
- [h]"+&r"(h),
- [dst]"+&r"(dst), [src]"+&r"(src),
- [c]"+&f"(c.f), [d]"+&f"(d.f)
- : [sstride]"r"((mips_reg)sstride),
- [dstride]"r"((mips_reg)dstride),
- [ff_pw_4]"f"(ff_pw_4.f)
- : "memory"
- );
- #else
- int c = 8 - my, d = my;
- int x, y;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3;
- dst += dstride;
- src += sstride;
- }
- #endif
- }
- void ff_put_vp8_bilinear4_hv_mmi(uint8_t *dst, ptrdiff_t dstride, uint8_t *src,
- ptrdiff_t sstride, int h, int mx, int my)
- {
- #if 1
- DECLARE_ALIGNED(4, uint8_t, tmp_array[36]);
- uint8_t *tmp = tmp_array;
- ff_put_vp8_bilinear4_h_mmi(tmp, 4, src, sstride, h + 1, mx, my);
- ff_put_vp8_bilinear4_v_mmi(dst, dstride, tmp, 4, h, mx, my);
- #else
- int a = 8 - mx, b = mx;
- int c = 8 - my, d = my;
- int x, y;
- uint8_t tmp_array[36];
- uint8_t *tmp = tmp_array;
- for (y = 0; y < h + 1; y++) {
- for (x = 0; x < 4; x++)
- tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3;
- tmp += 4;
- src += sstride;
- }
- tmp = tmp_array;
- for (y = 0; y < h; y++) {
- for (x = 0; x < 4; x++)
- dst[x] = (c * tmp[x] + d * tmp[x + 4] + 4) >> 3;
- dst += dstride;
- tmp += 4;
- }
- #endif
- }
|