tests.rs 242 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657
  1. #![cfg(test)]
  2. use {
  3. super::*,
  4. crate::{
  5. accounts_file::AccountsFileProvider,
  6. accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude},
  7. append_vec::{
  8. aligned_stored_size, test_utils::TempFile, AccountMeta, AppendVec, StoredMeta,
  9. },
  10. storable_accounts::AccountForStorage,
  11. },
  12. itertools::Itertools,
  13. rand::{prelude::SliceRandom, thread_rng, Rng},
  14. solana_account::{
  15. accounts_equal, Account, AccountSharedData, InheritableAccountFields, ReadableAccount,
  16. WritableAccount, DUMMY_INHERITABLE_ACCOUNT_FIELDS,
  17. },
  18. solana_lattice_hash::lt_hash::Checksum as LtHashChecksum,
  19. solana_pubkey::PUBKEY_BYTES,
  20. std::{
  21. iter::{self, FromIterator},
  22. ops::Range,
  23. str::FromStr,
  24. sync::{atomic::AtomicBool, RwLock},
  25. thread::{self, Builder, JoinHandle},
  26. },
  27. test_case::{test_case, test_matrix},
  28. };
  29. fn linear_ancestors(end_slot: u64) -> Ancestors {
  30. let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
  31. for i in 1..end_slot {
  32. ancestors.insert(i, (i - 1) as usize);
  33. }
  34. ancestors
  35. }
  36. impl AccountsDb {
  37. fn get_storage_for_slot(&self, slot: Slot) -> Option<Arc<AccountStorageEntry>> {
  38. self.storage.get_slot_storage_entry(slot)
  39. }
  40. }
  41. /// this tuple contains slot info PER account
  42. impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a> for (Slot, &'a [(&'a Pubkey, &'a T, Slot)])
  43. where
  44. AccountForStorage<'a>: From<&'a T>,
  45. {
  46. fn is_zero_lamport(&self, index: usize) -> bool {
  47. self.1[index].1.lamports() == 0
  48. }
  49. fn data_len(&self, index: usize) -> usize {
  50. self.1[index].1.data().len()
  51. }
  52. fn account<Ret>(
  53. &self,
  54. index: usize,
  55. mut callback: impl for<'local> FnMut(AccountForStorage<'local>) -> Ret,
  56. ) -> Ret {
  57. callback(self.1[index].1.into())
  58. }
  59. fn pubkey(&self, index: usize) -> &Pubkey {
  60. self.1[index].0
  61. }
  62. fn slot(&self, index: usize) -> Slot {
  63. // note that this could be different than 'target_slot()' PER account
  64. self.1[index].2
  65. }
  66. fn target_slot(&self) -> Slot {
  67. self.0
  68. }
  69. fn len(&self) -> usize {
  70. self.1.len()
  71. }
  72. fn contains_multiple_slots(&self) -> bool {
  73. let len = self.len();
  74. if len > 0 {
  75. let slot = self.slot(0);
  76. // true if any item has a different slot than the first item
  77. (1..len).any(|i| slot != self.slot(i))
  78. } else {
  79. false
  80. }
  81. }
  82. }
  83. fn create_loadable_account_with_fields(
  84. name: &str,
  85. (lamports, rent_epoch): InheritableAccountFields,
  86. ) -> AccountSharedData {
  87. AccountSharedData::from(Account {
  88. lamports,
  89. owner: solana_sdk_ids::native_loader::id(),
  90. data: name.as_bytes().to_vec(),
  91. executable: true,
  92. rent_epoch,
  93. })
  94. }
  95. fn create_loadable_account_for_test(name: &str) -> AccountSharedData {
  96. create_loadable_account_with_fields(name, DUMMY_INHERITABLE_ACCOUNT_FIELDS)
  97. }
  98. impl AccountStorageEntry {
  99. fn add_account(&self, num_bytes: usize) {
  100. self.add_accounts(1, num_bytes)
  101. }
  102. }
  103. /// Helper macro to define accounts_db_test for both `AppendVec` and `HotStorage`.
  104. /// This macro supports creating both regular tests and tests that should panic.
  105. /// Usage:
  106. /// For regular test, use the following syntax.
  107. /// define_accounts_db_test!(TEST_NAME, |accounts_db| { TEST_BODY }); // regular test
  108. /// For test that should panic, use the following syntax.
  109. /// define_accounts_db_test!(TEST_NAME, panic = "PANIC_MSG", |accounts_db| { TEST_BODY });
  110. macro_rules! define_accounts_db_test {
  111. (@testfn $name:ident, $accounts_file_provider: ident, $mark_obsolete_accounts: ident, |$accounts_db:ident| $inner: tt) => {
  112. fn run_test($accounts_db: AccountsDb) {
  113. $inner
  114. }
  115. let accounts_db = AccountsDb::new_single_for_tests_with_provider_and_config(
  116. $accounts_file_provider,
  117. AccountsDbConfig {
  118. mark_obsolete_accounts: $mark_obsolete_accounts,
  119. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  120. },
  121. );
  122. run_test(accounts_db);
  123. };
  124. ($name:ident, |$accounts_db:ident| $inner: tt) => {
  125. #[test_matrix(
  126. [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage],
  127. [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled]
  128. )]
  129. fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) {
  130. define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner);
  131. }
  132. };
  133. ($name:ident, panic = $panic_message:literal, |$accounts_db:ident| $inner: tt) => {
  134. #[test_matrix(
  135. [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage],
  136. [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled]
  137. )]
  138. #[should_panic(expected = $panic_message)]
  139. fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) {
  140. define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner);
  141. }
  142. };
  143. }
  144. pub(crate) use define_accounts_db_test;
  145. fn run_generate_index_duplicates_within_slot_test(db: AccountsDb, reverse: bool) {
  146. let slot0 = 0;
  147. let pubkey = Pubkey::from([1; 32]);
  148. let append_vec = db.create_and_insert_store(slot0, 1000, "test");
  149. let mut account_small = AccountSharedData::default();
  150. account_small.set_data(vec![1]);
  151. account_small.set_lamports(1);
  152. let mut account_big = AccountSharedData::default();
  153. account_big.set_data(vec![5; 10]);
  154. account_big.set_lamports(2);
  155. assert_ne!(
  156. aligned_stored_size(account_big.data().len()),
  157. aligned_stored_size(account_small.data().len())
  158. );
  159. // same account twice with different data lens
  160. // Rules are the last one of each pubkey is the one that ends up in the index.
  161. let mut data = vec![(&pubkey, &account_big), (&pubkey, &account_small)];
  162. if reverse {
  163. data = data.into_iter().rev().collect();
  164. }
  165. let storable_accounts = (slot0, &data[..]);
  166. // construct append vec with account to generate an index from
  167. append_vec.accounts.write_accounts(&storable_accounts, 0);
  168. assert!(!db.accounts_index.contains(&pubkey));
  169. let storage_info = StorageSizeAndCountMap::default();
  170. let storage = db.get_storage_for_slot(slot0).unwrap();
  171. let mut reader = append_vec::new_scan_accounts_reader();
  172. db.generate_index_for_slot(
  173. &mut reader,
  174. &storage,
  175. storage.slot(),
  176. storage.id(),
  177. &storage_info,
  178. );
  179. }
  180. define_accounts_db_test!(
  181. test_generate_index_duplicates_within_slot,
  182. panic = "Accounts may only be stored once per slot:",
  183. |db| {
  184. run_generate_index_duplicates_within_slot_test(db, false);
  185. }
  186. );
  187. define_accounts_db_test!(
  188. test_generate_index_duplicates_within_slot_reverse,
  189. panic = "Accounts may only be stored once per slot:",
  190. |db| {
  191. run_generate_index_duplicates_within_slot_test(db, true);
  192. }
  193. );
  194. #[test]
  195. fn test_generate_index_for_single_ref_zero_lamport_slot() {
  196. let db = AccountsDb::new_single_for_tests();
  197. let slot0 = 0;
  198. let pubkey = Pubkey::from([1; 32]);
  199. let append_vec = db.create_and_insert_store(slot0, 1000, "test");
  200. let account = AccountSharedData::default();
  201. let data = [(&pubkey, &account)];
  202. let storable_accounts = (slot0, &data[..]);
  203. append_vec.accounts.write_accounts(&storable_accounts, 0);
  204. assert!(!db.accounts_index.contains(&pubkey));
  205. let result = db.generate_index(None, false);
  206. let slot_list_len = db.accounts_index.get_and_then(&pubkey, |entry| {
  207. (false, entry.unwrap().slot_list_lock_read_len())
  208. });
  209. assert_eq!(slot_list_len, 1);
  210. assert_eq!(append_vec.alive_bytes(), aligned_stored_size(0));
  211. assert_eq!(append_vec.accounts_count(), 1);
  212. assert_eq!(append_vec.count(), 1);
  213. assert_eq!(result.accounts_data_len, 0);
  214. assert_eq!(1, append_vec.num_zero_lamport_single_ref_accounts());
  215. assert_eq!(
  216. 0,
  217. append_vec.alive_bytes_exclude_zero_lamport_single_ref_accounts()
  218. );
  219. }
  220. fn generate_sample_account_from_storage(i: u8) -> AccountFromStorage {
  221. // offset has to be 8 byte aligned
  222. let offset = (i as usize) * std::mem::size_of::<u64>();
  223. AccountFromStorage {
  224. index_info: AccountInfo::new(StorageLocation::AppendVec(i as u32, offset), i == 0),
  225. data_len: i as u64,
  226. pubkey: Pubkey::new_from_array([i; 32]),
  227. }
  228. }
  229. /// Reserve ancient storage size is not supported for TiredStorage
  230. #[test]
  231. fn test_sort_and_remove_dups() {
  232. // empty
  233. let mut test1 = vec![];
  234. let expected = test1.clone();
  235. AccountsDb::sort_and_remove_dups(&mut test1);
  236. assert_eq!(test1, expected);
  237. assert_eq!(test1, expected);
  238. // just 0
  239. let mut test1 = vec![generate_sample_account_from_storage(0)];
  240. let expected = test1.clone();
  241. AccountsDb::sort_and_remove_dups(&mut test1);
  242. assert_eq!(test1, expected);
  243. assert_eq!(test1, expected);
  244. // 0, 1
  245. let mut test1 = vec![
  246. generate_sample_account_from_storage(0),
  247. generate_sample_account_from_storage(1),
  248. ];
  249. let expected = test1.clone();
  250. AccountsDb::sort_and_remove_dups(&mut test1);
  251. assert_eq!(test1, expected);
  252. assert_eq!(test1, expected);
  253. // 1, 0. sort should reverse
  254. let mut test2 = vec![
  255. generate_sample_account_from_storage(1),
  256. generate_sample_account_from_storage(0),
  257. ];
  258. AccountsDb::sort_and_remove_dups(&mut test2);
  259. assert_eq!(test2, expected);
  260. assert_eq!(test2, expected);
  261. for insert_other_good in 0..2 {
  262. // 0 twice so it gets removed
  263. let mut test1 = vec![
  264. generate_sample_account_from_storage(0),
  265. generate_sample_account_from_storage(0),
  266. ];
  267. let mut expected = test1.clone();
  268. expected.truncate(1); // get rid of 1st duplicate
  269. test1.first_mut().unwrap().data_len = 2342342; // this one should be ignored, so modify the data_len so it will fail the compare below if it is used
  270. if insert_other_good < 2 {
  271. // insert another good one before or after the 2 bad ones
  272. test1.insert(insert_other_good, generate_sample_account_from_storage(1));
  273. // other good one should always be last since it is sorted after
  274. expected.push(generate_sample_account_from_storage(1));
  275. }
  276. AccountsDb::sort_and_remove_dups(&mut test1);
  277. assert_eq!(test1, expected);
  278. assert_eq!(test1, expected);
  279. }
  280. let mut test1 = [1, 0, 1, 0, 1u8]
  281. .into_iter()
  282. .map(generate_sample_account_from_storage)
  283. .collect::<Vec<_>>();
  284. test1.iter_mut().take(3).for_each(|entry| {
  285. entry.data_len = 2342342; // this one should be ignored, so modify the data_len so it will fail the compare below if it is used
  286. entry.index_info = AccountInfo::new(StorageLocation::Cached, false);
  287. });
  288. let expected = [0, 1u8]
  289. .into_iter()
  290. .map(generate_sample_account_from_storage)
  291. .collect::<Vec<_>>();
  292. AccountsDb::sort_and_remove_dups(&mut test1);
  293. assert_eq!(test1, expected);
  294. assert_eq!(test1, expected);
  295. }
  296. #[test]
  297. fn test_sort_and_remove_dups_random() {
  298. use rand::prelude::*;
  299. let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1234);
  300. let accounts: Vec<_> =
  301. std::iter::repeat_with(|| generate_sample_account_from_storage(rng.gen::<u8>()))
  302. .take(1000)
  303. .collect();
  304. let mut accounts1 = accounts.clone();
  305. let num_dups1 = AccountsDb::sort_and_remove_dups(&mut accounts1);
  306. // Use BTreeMap to calculate sort and remove dups alternatively.
  307. let mut map = std::collections::BTreeMap::default();
  308. let mut num_dups2 = 0;
  309. for account in accounts.iter() {
  310. if map.insert(*account.pubkey(), *account).is_some() {
  311. num_dups2 += 1;
  312. }
  313. }
  314. let accounts2: Vec<_> = map.into_values().collect();
  315. assert_eq!(accounts1, accounts2);
  316. assert_eq!(num_dups1, num_dups2);
  317. }
  318. pub(crate) fn append_single_account_with_default_hash(
  319. storage: &AccountStorageEntry,
  320. pubkey: &Pubkey,
  321. account: &AccountSharedData,
  322. mark_alive: bool,
  323. add_to_index: Option<&AccountInfoAccountsIndex>,
  324. ) {
  325. let slot = storage.slot();
  326. let accounts = [(pubkey, account)];
  327. let slice = &accounts[..];
  328. let storable_accounts = (slot, slice);
  329. let stored_accounts_info = storage
  330. .accounts
  331. .write_accounts(&storable_accounts, 0)
  332. .unwrap();
  333. if mark_alive {
  334. // updates 'alive_bytes' on the storage
  335. storage.add_account(stored_accounts_info.size);
  336. }
  337. if let Some(index) = add_to_index {
  338. let account_info = AccountInfo::new(
  339. StorageLocation::AppendVec(storage.id(), stored_accounts_info.offsets[0]),
  340. account.lamports() == 0,
  341. );
  342. index.upsert(
  343. slot,
  344. slot,
  345. pubkey,
  346. account,
  347. &AccountSecondaryIndexes::default(),
  348. account_info,
  349. &mut ReclaimsSlotList::new(),
  350. UpsertReclaim::IgnoreReclaims,
  351. );
  352. }
  353. }
  354. fn append_sample_data_to_storage(
  355. storage: &AccountStorageEntry,
  356. pubkey: &Pubkey,
  357. mark_alive: bool,
  358. account_data_size: Option<u64>,
  359. ) {
  360. let acc = AccountSharedData::new(
  361. 1,
  362. account_data_size.unwrap_or(48) as usize,
  363. AccountSharedData::default().owner(),
  364. );
  365. append_single_account_with_default_hash(storage, pubkey, &acc, mark_alive, None);
  366. }
  367. fn sample_storage_with_entries_id_fill_percentage(
  368. tf: &TempFile,
  369. slot: Slot,
  370. pubkey: &Pubkey,
  371. id: AccountsFileId,
  372. mark_alive: bool,
  373. account_data_size: Option<u64>,
  374. fill_percentage: u64,
  375. storage_access: StorageAccess,
  376. ) -> Arc<AccountStorageEntry> {
  377. let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
  378. let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage;
  379. let size_aligned: usize = aligned_stored_size(file_size as usize);
  380. let mut data = AccountStorageEntry::new(
  381. &paths[0],
  382. slot,
  383. id,
  384. size_aligned as u64,
  385. AccountsFileProvider::AppendVec,
  386. storage_access,
  387. );
  388. let av = AccountsFile::AppendVec(AppendVec::new(
  389. &tf.path,
  390. true,
  391. (1024 * 1024).max(size_aligned),
  392. storage_access,
  393. ));
  394. data.accounts = av;
  395. let arc = Arc::new(data);
  396. append_sample_data_to_storage(&arc, pubkey, mark_alive, account_data_size);
  397. arc
  398. }
  399. fn sample_storage_with_entries_id(
  400. tf: &TempFile,
  401. slot: Slot,
  402. pubkey: &Pubkey,
  403. id: AccountsFileId,
  404. mark_alive: bool,
  405. account_data_size: Option<u64>,
  406. storage_access: StorageAccess,
  407. ) -> Arc<AccountStorageEntry> {
  408. sample_storage_with_entries_id_fill_percentage(
  409. tf,
  410. slot,
  411. pubkey,
  412. id,
  413. mark_alive,
  414. account_data_size,
  415. 100,
  416. storage_access,
  417. )
  418. }
  419. define_accounts_db_test!(test_accountsdb_add_root, |db| {
  420. let key = Pubkey::default();
  421. let account0 = AccountSharedData::new(1, 0, &key);
  422. db.store_for_tests((0, [(&key, &account0)].as_slice()));
  423. db.add_root(0);
  424. let ancestors = vec![(1, 1)].into_iter().collect();
  425. assert_eq!(
  426. db.load_without_fixed_root(&ancestors, &key),
  427. Some((account0, 0))
  428. );
  429. });
  430. define_accounts_db_test!(test_accountsdb_latest_ancestor, |db| {
  431. let key = Pubkey::default();
  432. let account0 = AccountSharedData::new(1, 0, &key);
  433. db.store_for_tests((0, [(&key, &account0)].as_slice()));
  434. let account1 = AccountSharedData::new(0, 0, &key);
  435. db.store_for_tests((1, [(&key, &account1)].as_slice()));
  436. let ancestors = vec![(1, 1)].into_iter().collect();
  437. assert_eq!(
  438. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  439. &account1
  440. );
  441. let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
  442. assert_eq!(
  443. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  444. &account1
  445. );
  446. let mut accounts = Vec::new();
  447. db.scan_accounts(
  448. &ancestors,
  449. 0,
  450. |scan_result| {
  451. if let Some((_, account, _)) = scan_result {
  452. accounts.push(account);
  453. }
  454. },
  455. &ScanConfig::default(),
  456. )
  457. .expect("should scan accounts");
  458. assert_eq!(accounts, vec![account1]);
  459. });
  460. define_accounts_db_test!(test_accountsdb_latest_ancestor_with_root, |db| {
  461. let key = Pubkey::default();
  462. let account0 = AccountSharedData::new(1, 0, &key);
  463. db.store_for_tests((0, [(&key, &account0)].as_slice()));
  464. let account1 = AccountSharedData::new(0, 0, &key);
  465. db.store_for_tests((1, [(&key, &account1)].as_slice()));
  466. db.add_root(0);
  467. let ancestors = vec![(1, 1)].into_iter().collect();
  468. assert_eq!(
  469. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  470. &account1
  471. );
  472. let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
  473. assert_eq!(
  474. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  475. &account1
  476. );
  477. });
  478. define_accounts_db_test!(test_accountsdb_root_one_slot, |db| {
  479. let key = Pubkey::default();
  480. let account0 = AccountSharedData::new(1, 0, &key);
  481. // store value 1 in the "root", i.e. db zero
  482. db.store_for_tests((0, [(&key, &account0)].as_slice()));
  483. // now we have:
  484. //
  485. // root0 -> key.lamports==1
  486. // / \
  487. // / \
  488. // key.lamports==0 <- slot1 \
  489. // slot2 -> key.lamports==1
  490. // (via root0)
  491. // store value 0 in one child
  492. let account1 = AccountSharedData::new(0, 0, &key);
  493. db.store_for_tests((1, [(&key, &account1)].as_slice()));
  494. // masking accounts is done at the Accounts level, at accountsDB we see
  495. // original account (but could also accept "None", which is implemented
  496. // at the Accounts level)
  497. let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
  498. assert_eq!(
  499. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  500. &account1
  501. );
  502. // we should see 1 token in slot 2
  503. let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
  504. assert_eq!(
  505. &db.load_without_fixed_root(&ancestors, &key).unwrap().0,
  506. &account0
  507. );
  508. db.add_root(0);
  509. let ancestors = vec![(1, 1)].into_iter().collect();
  510. assert_eq!(
  511. db.load_without_fixed_root(&ancestors, &key),
  512. Some((account1, 1))
  513. );
  514. let ancestors = vec![(2, 2)].into_iter().collect();
  515. assert_eq!(
  516. db.load_without_fixed_root(&ancestors, &key),
  517. Some((account0, 0))
  518. ); // original value
  519. });
  520. define_accounts_db_test!(test_accountsdb_add_root_many, |db| {
  521. let mut pubkeys: Vec<Pubkey> = vec![];
  522. db.create_account(&mut pubkeys, 0, 100, 0, 0);
  523. for _ in 1..100 {
  524. let idx = thread_rng().gen_range(0..99);
  525. let ancestors = vec![(0, 0)].into_iter().collect();
  526. let account = db
  527. .load_without_fixed_root(&ancestors, &pubkeys[idx])
  528. .unwrap();
  529. let default_account = AccountSharedData::from(Account {
  530. lamports: (idx + 1) as u64,
  531. ..Account::default()
  532. });
  533. assert_eq!((default_account, 0), account);
  534. }
  535. db.add_root(0);
  536. // check that all the accounts appear with a new root
  537. for _ in 1..100 {
  538. let idx = thread_rng().gen_range(0..99);
  539. let ancestors = vec![(0, 0)].into_iter().collect();
  540. let account0 = db
  541. .load_without_fixed_root(&ancestors, &pubkeys[idx])
  542. .unwrap();
  543. let ancestors = vec![(1, 1)].into_iter().collect();
  544. let account1 = db
  545. .load_without_fixed_root(&ancestors, &pubkeys[idx])
  546. .unwrap();
  547. let default_account = AccountSharedData::from(Account {
  548. lamports: (idx + 1) as u64,
  549. ..Account::default()
  550. });
  551. assert_eq!(&default_account, &account0.0);
  552. assert_eq!(&default_account, &account1.0);
  553. }
  554. });
  555. define_accounts_db_test!(test_accountsdb_count_stores, |db| {
  556. let mut pubkeys: Vec<Pubkey> = vec![];
  557. db.create_account(&mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
  558. db.add_root_and_flush_write_cache(0);
  559. db.check_storage(0, 2, 2);
  560. let pubkey = solana_pubkey::new_rand();
  561. let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
  562. db.store_for_tests((1, [(&pubkey, &account)].as_slice()));
  563. db.store_for_tests((1, [(&pubkeys[0], &account)].as_slice()));
  564. // adding root doesn't change anything
  565. db.add_root_and_flush_write_cache(1);
  566. {
  567. let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap();
  568. let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap();
  569. // With obsolete accounts enabled, flush_write_cache will clean pubkeys in slot0
  570. // when flushing slot1
  571. if db.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
  572. assert_eq!(slot_0_store.count(), 1);
  573. } else {
  574. assert_eq!(slot_0_store.count(), 2);
  575. }
  576. assert_eq!(slot_1_store.count(), 2);
  577. assert_eq!(slot_0_store.accounts_count(), 2);
  578. assert_eq!(slot_1_store.accounts_count(), 2);
  579. }
  580. // overwrite old rooted account version; only the r_slot_0_stores.count() should be
  581. // decremented
  582. // slot 2 is not a root and should be ignored by clean
  583. db.store_for_tests((2, [(&pubkeys[0], &account)].as_slice()));
  584. db.clean_accounts_for_tests();
  585. {
  586. let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap();
  587. let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap();
  588. assert_eq!(slot_0_store.count(), 1);
  589. assert_eq!(slot_1_store.count(), 2);
  590. assert_eq!(slot_0_store.accounts_count(), 2);
  591. assert_eq!(slot_1_store.accounts_count(), 2);
  592. }
  593. });
  594. define_accounts_db_test!(test_accounts_unsquashed, |db0| {
  595. let key = Pubkey::default();
  596. // 1 token in the "root", i.e. db zero
  597. let account0 = AccountSharedData::new(1, 0, &key);
  598. db0.store_for_tests((0, [(&key, &account0)].as_slice()));
  599. // 0 lamports in the child
  600. let account1 = AccountSharedData::new(0, 0, &key);
  601. db0.store_for_tests((1, [(&key, &account1)].as_slice()));
  602. // masking accounts is done at the Accounts level, at accountsDB we see
  603. // original account
  604. let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
  605. assert_eq!(
  606. db0.load_without_fixed_root(&ancestors, &key),
  607. Some((account1, 1))
  608. );
  609. let ancestors = vec![(0, 0)].into_iter().collect();
  610. assert_eq!(
  611. db0.load_without_fixed_root(&ancestors, &key),
  612. Some((account0, 0))
  613. );
  614. });
  615. /// Test to verify that reclaiming old storages during flush works correctly.
  616. /// Creates multiple storages with accounts, flushes them, and then creates a new storage
  617. /// that invalidates some of the old accounts. The test checks that one of the old storages
  618. /// is reclaimed as the storage is fully invalidated
  619. #[test]
  620. fn test_flush_slots_with_reclaim_old_slots() {
  621. let accounts = AccountsDb::new_single_for_tests();
  622. let mut pubkeys = vec![];
  623. // Create and flush 5 slots with 5 accounts each
  624. for slot in 0..5 {
  625. let mut slot_pubkeys = vec![];
  626. for _ in 0..5 {
  627. let pubkey = solana_pubkey::new_rand();
  628. let account = AccountSharedData::new(slot + 1, 0, &pubkey);
  629. accounts.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
  630. slot_pubkeys.push(pubkey);
  631. }
  632. pubkeys.push(slot_pubkeys);
  633. accounts.add_root_and_flush_write_cache(slot);
  634. }
  635. // Create another slot which invalidates 5 accounts from the first slot,
  636. // 4 accounts from the second slot, etc.
  637. let new_slot = 5;
  638. for (slot, slot_pubkeys) in pubkeys.iter().enumerate() {
  639. for pubkey in slot_pubkeys.iter().take(5 - slot) {
  640. let account = AccountSharedData::new(new_slot + 1, 0, pubkey);
  641. accounts.store_for_tests((new_slot, [(pubkey, &account)].as_slice()));
  642. }
  643. }
  644. // Get the accounts from the write cache slot
  645. let accounts_list: Vec<(_, _)> = accounts
  646. .accounts_cache
  647. .slot_cache(new_slot)
  648. .unwrap()
  649. .iter()
  650. .map(|iter_item| {
  651. let pubkey = *iter_item.key();
  652. let account = iter_item.value().account.clone();
  653. (pubkey, account)
  654. })
  655. .collect();
  656. let storage = accounts.create_and_insert_store(new_slot, 4096, "test_flush_slots");
  657. accounts.accounts_index.add_root(new_slot);
  658. // Flushing this storage directly using _store_accounts_frozen. This is done to pass in UpsertReclaim::ReclaimOldSlots
  659. accounts._store_accounts_frozen(
  660. (new_slot, &accounts_list[..]),
  661. &storage,
  662. UpsertReclaim::ReclaimOldSlots,
  663. UpdateIndexThreadSelection::Inline,
  664. );
  665. // Remove the flushed slot from the cache
  666. assert!(accounts.accounts_cache.remove_slot(new_slot).is_some());
  667. // Verify that the storage for the first slot has been removed
  668. assert!(accounts.storage.get_slot_storage_entry(0).is_none());
  669. for slot in 1..5 {
  670. assert!(accounts.storage.get_slot_storage_entry(slot).is_some());
  671. // Verify that the obsolete accounts for the remaining slots are correct
  672. let storage = accounts.storage.get_slot_storage_entry(slot).unwrap();
  673. assert_eq!(
  674. storage
  675. .obsolete_accounts_read_lock()
  676. .filter_obsolete_accounts(Some(new_slot))
  677. .count() as u64,
  678. 5 - slot
  679. );
  680. }
  681. assert!(accounts.storage.get_slot_storage_entry(new_slot).is_some());
  682. }
  683. fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) {
  684. let unrooted_slot = 9;
  685. let unrooted_bank_id = 9;
  686. let key = Pubkey::default();
  687. let account0 = AccountSharedData::new(1, 0, &key);
  688. let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
  689. assert!(!db.accounts_index.contains(&key));
  690. if is_cached {
  691. db.store_for_tests((unrooted_slot, &[(&key, &account0)][..]));
  692. assert!(db.accounts_cache.contains(unrooted_slot));
  693. } else {
  694. let file_size = 4096; // value doesn't need to be exact, just big enough to hold account0
  695. let storage = db.create_and_insert_store(unrooted_slot, file_size, "");
  696. db.store_accounts_frozen(
  697. (unrooted_slot, [(&key, &account0)].as_slice()),
  698. &storage,
  699. UpdateIndexThreadSelection::Inline,
  700. );
  701. assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_some());
  702. }
  703. assert!(!db.accounts_index.is_alive_root(unrooted_slot));
  704. assert!(db.accounts_index.contains(&key));
  705. db.assert_load_account(unrooted_slot, key, 1);
  706. // Purge the slot
  707. db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
  708. assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
  709. assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none());
  710. assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_none());
  711. assert!(!db.accounts_index.contains(&key));
  712. // Test we can store for the same slot again and get the right information
  713. let account0 = AccountSharedData::new(2, 0, &key);
  714. db.store_for_tests((unrooted_slot, [(&key, &account0)].as_slice()));
  715. db.assert_load_account(unrooted_slot, key, 2);
  716. }
  717. define_accounts_db_test!(test_remove_unrooted_slot_cached, |db| {
  718. run_test_remove_unrooted_slot(true, db);
  719. });
  720. define_accounts_db_test!(test_remove_unrooted_slot_storage, |db| {
  721. run_test_remove_unrooted_slot(false, db);
  722. });
  723. fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
  724. for _ in 1..1000 {
  725. let idx = thread_rng().gen_range(0..range);
  726. let ancestors = vec![(slot, 0)].into_iter().collect();
  727. if let Some((mut account, _)) = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx])
  728. {
  729. account.checked_add_lamports(1).unwrap();
  730. accounts.store_for_tests((slot, [(&pubkeys[idx], &account)].as_slice()));
  731. if account.is_zero_lamport() {
  732. let ancestors = vec![(slot, 0)].into_iter().collect();
  733. assert!(accounts
  734. .load_without_fixed_root(&ancestors, &pubkeys[idx])
  735. .is_none());
  736. } else {
  737. let default_account = AccountSharedData::from(Account {
  738. lamports: account.lamports(),
  739. ..Account::default()
  740. });
  741. assert_eq!(default_account, account);
  742. }
  743. }
  744. }
  745. }
  746. #[test]
  747. fn test_account_one() {
  748. let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
  749. let db = AccountsDb::new_for_tests(paths);
  750. let mut pubkeys: Vec<Pubkey> = vec![];
  751. db.create_account(&mut pubkeys, 0, 1, 0, 0);
  752. let ancestors = vec![(0, 0)].into_iter().collect();
  753. let account = db.load_without_fixed_root(&ancestors, &pubkeys[0]).unwrap();
  754. let default_account = AccountSharedData::from(Account {
  755. lamports: 1,
  756. ..Account::default()
  757. });
  758. assert_eq!((default_account, 0), account);
  759. }
  760. #[test]
  761. fn test_account_many() {
  762. let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
  763. let db = AccountsDb::new_for_tests(paths);
  764. let mut pubkeys: Vec<Pubkey> = vec![];
  765. db.create_account(&mut pubkeys, 0, 100, 0, 0);
  766. db.check_accounts(&pubkeys, 0, 100, 1);
  767. }
  768. #[test]
  769. fn test_account_update() {
  770. let accounts = AccountsDb::new_single_for_tests();
  771. let mut pubkeys: Vec<Pubkey> = vec![];
  772. accounts.create_account(&mut pubkeys, 0, 100, 0, 0);
  773. update_accounts(&accounts, &pubkeys, 0, 99);
  774. accounts.add_root_and_flush_write_cache(0);
  775. accounts.check_storage(0, 100, 100);
  776. }
  777. #[test]
  778. fn test_account_grow_many() {
  779. let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap();
  780. let size = 4096;
  781. let accounts = AccountsDb {
  782. file_size: size,
  783. ..AccountsDb::new_for_tests(paths)
  784. };
  785. let mut keys = vec![];
  786. for i in 0..9 {
  787. let key = solana_pubkey::new_rand();
  788. let account = AccountSharedData::new(i + 1, size as usize / 4, &key);
  789. accounts.store_for_tests((0, [(&key, &account)].as_slice()));
  790. keys.push(key);
  791. }
  792. let ancestors = vec![(0, 0)].into_iter().collect();
  793. for (i, key) in keys.iter().enumerate() {
  794. assert_eq!(
  795. accounts
  796. .load_without_fixed_root(&ancestors, key)
  797. .unwrap()
  798. .0
  799. .lamports(),
  800. (i as u64) + 1
  801. );
  802. }
  803. let mut append_vec_histogram = HashMap::new();
  804. let mut all_slots = vec![];
  805. for slot_storage in accounts.storage.iter() {
  806. all_slots.push(slot_storage.0)
  807. }
  808. for slot in all_slots {
  809. *append_vec_histogram.entry(slot).or_insert(0) += 1;
  810. }
  811. for count in append_vec_histogram.values() {
  812. assert!(*count >= 2);
  813. }
  814. }
  815. #[test]
  816. fn test_account_grow() {
  817. for pass in 0..27 {
  818. let accounts = AccountsDb::new_single_for_tests();
  819. let pubkey1 = solana_pubkey::new_rand();
  820. let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
  821. accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice()));
  822. if pass == 0 {
  823. accounts.add_root_and_flush_write_cache(0);
  824. let store = &accounts.storage.get_slot_storage_entry(0).unwrap();
  825. assert_eq!(store.count(), 1);
  826. continue;
  827. }
  828. let pubkey2 = solana_pubkey::new_rand();
  829. let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
  830. accounts.store_for_tests((0, [(&pubkey2, &account2)].as_slice()));
  831. if pass == 1 {
  832. accounts.add_root_and_flush_write_cache(0);
  833. assert_eq!(accounts.storage.len(), 1);
  834. let store = &accounts.storage.get_slot_storage_entry(0).unwrap();
  835. assert_eq!(store.count(), 2);
  836. continue;
  837. }
  838. let ancestors = vec![(0, 0)].into_iter().collect();
  839. assert_eq!(
  840. accounts
  841. .load_without_fixed_root(&ancestors, &pubkey1)
  842. .unwrap()
  843. .0,
  844. account1
  845. );
  846. assert_eq!(
  847. accounts
  848. .load_without_fixed_root(&ancestors, &pubkey2)
  849. .unwrap()
  850. .0,
  851. account2
  852. );
  853. // lots of writes, but they are all duplicates
  854. for i in 0..25 {
  855. accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice()));
  856. let flush = pass == i + 2;
  857. if flush {
  858. accounts.add_root_and_flush_write_cache(0);
  859. assert_eq!(accounts.storage.len(), 1);
  860. }
  861. let ancestors = vec![(0, 0)].into_iter().collect();
  862. assert_eq!(
  863. accounts
  864. .load_without_fixed_root(&ancestors, &pubkey1)
  865. .unwrap()
  866. .0,
  867. account1
  868. );
  869. assert_eq!(
  870. accounts
  871. .load_without_fixed_root(&ancestors, &pubkey2)
  872. .unwrap()
  873. .0,
  874. account2
  875. );
  876. if flush {
  877. break;
  878. }
  879. }
  880. }
  881. }
  882. #[test]
  883. fn test_lazy_gc_slot() {
  884. agave_logger::setup();
  885. // Only run this test with mark obsolete accounts disabled as garbage collection
  886. // is not lazy with mark obsolete accounts enabled
  887. let accounts = AccountsDb::new_with_config(
  888. Vec::new(),
  889. AccountsDbConfig {
  890. mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
  891. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  892. },
  893. None,
  894. Arc::default(),
  895. );
  896. let pubkey = solana_pubkey::new_rand();
  897. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  898. //store an account
  899. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  900. accounts.add_root_and_flush_write_cache(0);
  901. let ancestors = vec![(0, 0)].into_iter().collect();
  902. let id = accounts
  903. .accounts_index
  904. .get_with_and_then(
  905. &pubkey,
  906. Some(&ancestors),
  907. None,
  908. false,
  909. |(_slot, account_info)| account_info.store_id(),
  910. )
  911. .unwrap();
  912. //slot is still there, since gc is lazy
  913. assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id);
  914. //store causes clean
  915. accounts.store_for_tests((1, [(&pubkey, &account)].as_slice()));
  916. //slot is gone
  917. accounts.print_accounts_stats("pre-clean");
  918. accounts.add_root_and_flush_write_cache(1);
  919. assert!(accounts.storage.get_slot_storage_entry(0).is_some());
  920. accounts.clean_accounts_for_tests();
  921. assert!(accounts.storage.get_slot_storage_entry(0).is_none());
  922. //new value is there
  923. let ancestors = vec![(1, 1)].into_iter().collect();
  924. assert_eq!(
  925. accounts.load_without_fixed_root(&ancestors, &pubkey),
  926. Some((account, 1))
  927. );
  928. }
  929. #[test]
  930. fn test_clean_zero_lamport_and_dead_slot() {
  931. agave_logger::setup();
  932. let accounts = AccountsDb::new_single_for_tests();
  933. let pubkey1 = solana_pubkey::new_rand();
  934. let pubkey2 = solana_pubkey::new_rand();
  935. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  936. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  937. // Store two accounts
  938. accounts.store_for_tests((0, [(&pubkey1, &account)].as_slice()));
  939. accounts.store_for_tests((0, [(&pubkey2, &account)].as_slice()));
  940. // Make sure both accounts are in the same AppendVec in slot 0, which
  941. // will prevent pubkey1 from being cleaned up later even when it's a
  942. // zero-lamport account
  943. let ancestors = vec![(0, 1)].into_iter().collect();
  944. let (slot1, account_info1) = accounts
  945. .accounts_index
  946. .get_with_and_then(
  947. &pubkey1,
  948. Some(&ancestors),
  949. None,
  950. false,
  951. |(slot, account_info)| (slot, account_info),
  952. )
  953. .unwrap();
  954. let (slot2, account_info2) = accounts
  955. .accounts_index
  956. .get_with_and_then(
  957. &pubkey2,
  958. Some(&ancestors),
  959. None,
  960. false,
  961. |(slot, account_info)| (slot, account_info),
  962. )
  963. .unwrap();
  964. assert_eq!(slot1, 0);
  965. assert_eq!(slot1, slot2);
  966. assert_eq!(account_info1.storage_location(), StorageLocation::Cached);
  967. assert_eq!(
  968. account_info1.storage_location(),
  969. account_info2.storage_location()
  970. );
  971. // Update account 1 in slot 1
  972. accounts.store_for_tests((1, [(&pubkey1, &account)].as_slice()));
  973. // Update account 1 as zero lamports account
  974. accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice()));
  975. // Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
  976. // slot 1 should be purged
  977. accounts.add_root_and_flush_write_cache(0);
  978. accounts.add_root_and_flush_write_cache(1);
  979. accounts.add_root_and_flush_write_cache(2);
  980. // Slot 1 should be removed, slot 0 cannot be removed because it still has
  981. // the latest update for pubkey 2
  982. accounts.clean_accounts_for_tests();
  983. assert!(accounts.storage.get_slot_storage_entry(0).is_some());
  984. assert!(accounts.storage.get_slot_storage_entry(1).is_none());
  985. // Slot 1 should be cleaned because all it's accounts are
  986. // zero lamports, and are not present in any other slot's
  987. // storage entries
  988. assert_eq!(accounts.alive_account_count_in_slot(1), 0);
  989. }
  990. #[test]
  991. fn test_clean_dead_slot_with_obsolete_accounts() {
  992. agave_logger::setup();
  993. // This test is triggering a scenario in reclaim_accounts where the entire slot is reclaimed
  994. // When an entire slot is reclaimed, it normally unrefs the pubkeys, while when individual
  995. // accounts are reclaimed it does not unref the pubkeys
  996. // Obsolete accounts are already unreffed so they should not be unreffed again
  997. let accounts = AccountsDb::new_with_config(
  998. Vec::new(),
  999. AccountsDbConfig {
  1000. mark_obsolete_accounts: MarkObsoleteAccounts::Enabled,
  1001. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  1002. },
  1003. None,
  1004. Arc::default(),
  1005. );
  1006. let pubkey = solana_pubkey::new_rand();
  1007. let pubkey2 = solana_pubkey::new_rand();
  1008. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  1009. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1010. accounts.set_latest_full_snapshot_slot(2);
  1011. // Store pubkey1 and pubkey2 in slot 0
  1012. accounts.store_for_tests((0, [(&pubkey, &account), (&pubkey2, &account)].as_slice()));
  1013. // Update pubkey1 and make pubkey2 a zero lamport account in slot 1
  1014. accounts.store_for_tests((
  1015. 1,
  1016. [(&pubkey, &account), (&pubkey2, &zero_lamport_account)].as_slice(),
  1017. ));
  1018. // Update pubkey1 as in slot 2
  1019. accounts.store_for_tests((2, [(&pubkey, &account)].as_slice()));
  1020. // Flush the slots individually to avoid reclaims
  1021. accounts.add_root_and_flush_write_cache(0);
  1022. accounts.add_root_and_flush_write_cache(1);
  1023. accounts.add_root_and_flush_write_cache(2);
  1024. // Slot 1 should not be removed as it has the zero lamport account
  1025. assert!(accounts.storage.get_slot_storage_entry(1).is_some());
  1026. let slot = accounts.storage.get_slot_storage_entry(1).unwrap();
  1027. // Ensure that slot1 also still contains the obsolete account
  1028. assert_eq!(
  1029. slot.obsolete_accounts_read_lock()
  1030. .filter_obsolete_accounts(None)
  1031. .count(),
  1032. 1
  1033. );
  1034. // Ref count for pubkey1 should be 1 as obsolete accounts are enabled
  1035. accounts.assert_ref_count(&pubkey, 1);
  1036. // Clean, which will remove slot1
  1037. accounts.clean_accounts_for_tests();
  1038. assert!(accounts.storage.get_slot_storage_entry(0).is_none());
  1039. assert!(accounts.storage.get_slot_storage_entry(1).is_none());
  1040. // Ref count for pubkey should be 1. It was NOT decremented during clean_accounts_for_tests
  1041. // despite slot 1 being removed, because the account was already obsolete
  1042. accounts.assert_ref_count(&pubkey, 1);
  1043. }
  1044. #[test]
  1045. #[should_panic(expected = "ref count expected to be zero")]
  1046. fn test_remove_zero_lamport_multi_ref_accounts_panic() {
  1047. let accounts = AccountsDb::new_single_for_tests();
  1048. let pubkey_zero = Pubkey::from([1; 32]);
  1049. let one_lamport_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1050. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1051. let slot = 1;
  1052. accounts.store_for_tests((slot, [(&pubkey_zero, &one_lamport_account)].as_slice()));
  1053. // Flush without cleaning to avoid reclaiming pubkey_zero early
  1054. accounts.add_root(1);
  1055. accounts.flush_rooted_accounts_cache(Some(slot), false);
  1056. accounts.store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice()));
  1057. // Flush without cleaning to avoid reclaiming pubkey_zero early
  1058. accounts.add_root(2);
  1059. accounts.flush_rooted_accounts_cache(Some(slot + 1), false);
  1060. // This should panic because there are 2 refs for pubkey_zero.
  1061. accounts.remove_zero_lamport_single_ref_accounts_after_shrink(
  1062. &[&pubkey_zero],
  1063. slot,
  1064. &ShrinkStats::default(),
  1065. true,
  1066. );
  1067. }
  1068. #[test]
  1069. fn test_remove_zero_lamport_single_ref_accounts_after_shrink() {
  1070. for pass in 0..3 {
  1071. let accounts = AccountsDb::new_single_for_tests();
  1072. let pubkey_zero = Pubkey::from([1; 32]);
  1073. let pubkey2 = Pubkey::from([2; 32]);
  1074. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1075. let zero_lamport_account =
  1076. AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1077. let slot = 1;
  1078. accounts.store_for_tests((
  1079. slot,
  1080. [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(),
  1081. ));
  1082. // Simulate rooting the zero-lamport account, writes it to storage
  1083. accounts.add_root_and_flush_write_cache(slot);
  1084. if pass > 0 {
  1085. // store in write cache
  1086. accounts
  1087. .store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice()));
  1088. if pass == 2 {
  1089. // This test pass is still relevant with obsolete accounts enabled, but can be
  1090. // removed if all scenarios where flush_write_cache doesn't clean are eliminated.
  1091. // add root and flush without clean (causing ref count to increase)
  1092. accounts.add_root(slot + 1);
  1093. accounts.flush_rooted_accounts_cache(None, false);
  1094. }
  1095. }
  1096. accounts.accounts_index.get_and_then(&pubkey_zero, |entry| {
  1097. let expected_ref_count = if pass < 2 { 1 } else { 2 };
  1098. assert_eq!(entry.unwrap().ref_count(), expected_ref_count, "{pass}");
  1099. let expected_slot_list = if pass < 1 { 1 } else { 2 };
  1100. assert_eq!(entry.unwrap().slot_list_lock_read_len(), expected_slot_list);
  1101. (false, ())
  1102. });
  1103. accounts.accounts_index.get_and_then(&pubkey2, |entry| {
  1104. assert!(entry.is_some());
  1105. (false, ())
  1106. });
  1107. let zero_lamport_single_ref_pubkeys = if pass < 2 { vec![&pubkey_zero] } else { vec![] };
  1108. accounts.remove_zero_lamport_single_ref_accounts_after_shrink(
  1109. &zero_lamport_single_ref_pubkeys,
  1110. slot,
  1111. &ShrinkStats::default(),
  1112. true,
  1113. );
  1114. accounts.accounts_index.get_and_then(&pubkey_zero, |entry| {
  1115. match pass {
  1116. 0 => {
  1117. // should not exist in index at all
  1118. assert!(entry.is_none(), "{pass}");
  1119. }
  1120. 1 => {
  1121. // alive only in slot + 1
  1122. assert_eq!(entry.unwrap().slot_list_lock_read_len(), 1);
  1123. assert_eq!(
  1124. entry
  1125. .unwrap()
  1126. .slot_list_read_lock()
  1127. .first()
  1128. .map(|(s, _)| s)
  1129. .cloned()
  1130. .unwrap(),
  1131. slot + 1
  1132. );
  1133. let expected_ref_count = 0;
  1134. assert_eq!(
  1135. entry.map(|e| e.ref_count()),
  1136. Some(expected_ref_count),
  1137. "{pass}"
  1138. );
  1139. }
  1140. 2 => {
  1141. // alive in both slot, slot + 1
  1142. assert_eq!(entry.unwrap().slot_list_lock_read_len(), 2);
  1143. let slots = entry
  1144. .unwrap()
  1145. .slot_list_read_lock()
  1146. .iter()
  1147. .map(|(s, _)| s)
  1148. .cloned()
  1149. .collect::<Vec<_>>();
  1150. assert_eq!(slots, vec![slot, slot + 1]);
  1151. let expected_ref_count = 2;
  1152. assert_eq!(
  1153. entry.map(|e| e.ref_count()),
  1154. Some(expected_ref_count),
  1155. "{pass}"
  1156. );
  1157. }
  1158. _ => {
  1159. unreachable!("Shouldn't reach here.")
  1160. }
  1161. }
  1162. (false, ())
  1163. });
  1164. accounts.accounts_index.get_and_then(&pubkey2, |entry| {
  1165. assert!(entry.is_some(), "{pass}");
  1166. (false, ())
  1167. });
  1168. }
  1169. }
  1170. #[test]
  1171. fn test_shrink_zero_lamport_single_ref_account() {
  1172. agave_logger::setup();
  1173. // note that 'None' checks the case based on the default value of `latest_full_snapshot_slot` in `AccountsDb`
  1174. for latest_full_snapshot_slot in [None, Some(0), Some(1), Some(2)] {
  1175. // store a zero and non-zero lamport account
  1176. // make sure clean marks the ref_count=1, zero lamport account dead and removes pubkey from index completely
  1177. let accounts = AccountsDb::new_single_for_tests();
  1178. let pubkey_zero = Pubkey::from([1; 32]);
  1179. let pubkey2 = Pubkey::from([2; 32]);
  1180. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1181. let zero_lamport_account =
  1182. AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1183. let slot = 1;
  1184. // Store a zero-lamport account and a non-zero lamport account
  1185. accounts.store_for_tests((
  1186. slot,
  1187. [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(),
  1188. ));
  1189. // Simulate rooting the zero-lamport account, should be a
  1190. // candidate for cleaning
  1191. accounts.add_root_and_flush_write_cache(slot);
  1192. // for testing, we need to cause shrink to think this will be productive.
  1193. // The zero lamport account isn't dead, but it can become dead inside shrink.
  1194. accounts
  1195. .storage
  1196. .get_slot_storage_entry(slot)
  1197. .unwrap()
  1198. .alive_bytes
  1199. .fetch_sub(aligned_stored_size(0), Ordering::Release);
  1200. if let Some(latest_full_snapshot_slot) = latest_full_snapshot_slot {
  1201. accounts.set_latest_full_snapshot_slot(latest_full_snapshot_slot);
  1202. }
  1203. // Shrink the slot. The behavior on the zero lamport account will depend on `latest_full_snapshot_slot`.
  1204. accounts.shrink_slot_forced(slot);
  1205. assert!(
  1206. accounts.storage.get_slot_storage_entry(1).is_some(),
  1207. "{latest_full_snapshot_slot:?}"
  1208. );
  1209. let expected_alive_count = if latest_full_snapshot_slot.unwrap_or(Slot::MAX) < slot {
  1210. // zero lamport account should NOT be dead in the index
  1211. assert!(
  1212. accounts
  1213. .accounts_index
  1214. .contains_with(&pubkey_zero, None, None),
  1215. "{latest_full_snapshot_slot:?}"
  1216. );
  1217. 2
  1218. } else {
  1219. // zero lamport account should be dead in the index
  1220. assert!(
  1221. !accounts
  1222. .accounts_index
  1223. .contains_with(&pubkey_zero, None, None),
  1224. "{latest_full_snapshot_slot:?}"
  1225. );
  1226. // the zero lamport account should be marked as dead
  1227. 1
  1228. };
  1229. assert_eq!(
  1230. accounts.alive_account_count_in_slot(slot),
  1231. expected_alive_count,
  1232. "{latest_full_snapshot_slot:?}"
  1233. );
  1234. // other account should still be alive
  1235. assert!(
  1236. accounts.accounts_index.contains_with(&pubkey2, None, None),
  1237. "{latest_full_snapshot_slot:?}"
  1238. );
  1239. assert!(
  1240. accounts.storage.get_slot_storage_entry(slot).is_some(),
  1241. "{latest_full_snapshot_slot:?}"
  1242. );
  1243. }
  1244. }
  1245. #[test]
  1246. fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
  1247. agave_logger::setup();
  1248. let accounts = AccountsDb::new_single_for_tests();
  1249. let pubkey1 = solana_pubkey::new_rand();
  1250. let pubkey2 = solana_pubkey::new_rand();
  1251. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1252. // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
  1253. // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
  1254. accounts.set_latest_full_snapshot_slot(0);
  1255. // Store 2 accounts in slot 0, then update account 1 in two more slots
  1256. accounts.store_for_tests((0, [(&pubkey1, &zero_lamport_account)].as_slice()));
  1257. accounts.store_for_tests((0, [(&pubkey2, &zero_lamport_account)].as_slice()));
  1258. accounts.store_for_tests((1, [(&pubkey1, &zero_lamport_account)].as_slice()));
  1259. accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice()));
  1260. // Root all slots
  1261. accounts.add_root_and_flush_write_cache(0);
  1262. accounts.add_root_and_flush_write_cache(1);
  1263. accounts.add_root_and_flush_write_cache(2);
  1264. // Account ref counts should match how many slots they were stored in
  1265. // Account 1 = 3 slots; account 2 = 1 slot
  1266. accounts.assert_ref_count(&pubkey1, 3);
  1267. accounts.assert_ref_count(&pubkey2, 1);
  1268. accounts.clean_accounts_for_tests();
  1269. // Slots 0 and 1 should each have been cleaned because all of their
  1270. // accounts are zero lamports
  1271. assert!(accounts.storage.get_slot_storage_entry(0).is_none());
  1272. assert!(accounts.storage.get_slot_storage_entry(1).is_none());
  1273. // Slot 2 only has a zero lamport account as well. But, calc_delete_dependencies()
  1274. // should exclude slot 2 from the clean due to changes in other slots
  1275. assert!(accounts.storage.get_slot_storage_entry(2).is_some());
  1276. // Index ref counts should be consistent with the slot stores. Account 1 ref count
  1277. // should be 1 since slot 2 is the only alive slot; account 2 should have a ref
  1278. // count of 0 due to slot 0 being dead
  1279. accounts.assert_ref_count(&pubkey1, 1);
  1280. accounts.assert_ref_count(&pubkey2, 0);
  1281. // Allow clean to clean any zero lamports up to and including slot 2
  1282. accounts.set_latest_full_snapshot_slot(2);
  1283. accounts.clean_accounts_for_tests();
  1284. // Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0
  1285. assert!(accounts.storage.get_slot_storage_entry(2).is_none());
  1286. accounts.assert_ref_count(&pubkey1, 0);
  1287. }
  1288. #[test]
  1289. fn test_clean_zero_lamport_and_old_roots() {
  1290. agave_logger::setup();
  1291. let accounts = AccountsDb::new_single_for_tests();
  1292. let pubkey = solana_pubkey::new_rand();
  1293. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1294. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1295. // Store a zero-lamport account
  1296. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  1297. accounts.store_for_tests((1, [(&pubkey, &zero_lamport_account)].as_slice()));
  1298. // Simulate rooting the zero-lamport account, should be a
  1299. // candidate for cleaning
  1300. accounts.add_root_and_flush_write_cache(0);
  1301. accounts.add_root_and_flush_write_cache(1);
  1302. // Slot 0 should be removed, and
  1303. // zero-lamport account should be cleaned
  1304. accounts.clean_accounts_for_tests();
  1305. assert!(accounts.storage.get_slot_storage_entry(0).is_none());
  1306. assert!(accounts.storage.get_slot_storage_entry(1).is_none());
  1307. // Slot 0 should be cleaned because all it's accounts have been
  1308. // updated in the rooted slot 1
  1309. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1310. // Slot 1 should be cleaned because all it's accounts are
  1311. // zero lamports, and are not present in any other slot's
  1312. // storage entries
  1313. assert_eq!(accounts.alive_account_count_in_slot(1), 0);
  1314. // zero lamport account, should no longer exist in accounts index
  1315. // because it has been removed
  1316. assert!(!accounts.accounts_index.contains_with(&pubkey, None, None));
  1317. }
  1318. #[test_case(MarkObsoleteAccounts::Enabled)]
  1319. #[test_case(MarkObsoleteAccounts::Disabled)]
  1320. fn test_clean_old_with_normal_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
  1321. agave_logger::setup();
  1322. let accounts = AccountsDb::new_with_config(
  1323. Vec::new(),
  1324. AccountsDbConfig {
  1325. mark_obsolete_accounts,
  1326. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  1327. },
  1328. None,
  1329. Arc::default(),
  1330. );
  1331. let pubkey = solana_pubkey::new_rand();
  1332. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1333. //store an account
  1334. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  1335. accounts.store_for_tests((1, [(&pubkey, &account)].as_slice()));
  1336. // simulate slots are rooted after while
  1337. accounts.add_root_and_flush_write_cache(0);
  1338. accounts.add_root_and_flush_write_cache(1);
  1339. assert_eq!(accounts.alive_account_count_in_slot(1), 1);
  1340. // With obsolete accounts enabled, slot 0 is cleaned during flush
  1341. if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
  1342. assert_eq!(accounts.alive_account_count_in_slot(0), 1);
  1343. accounts.clean_accounts_for_tests();
  1344. }
  1345. //now old state is cleaned up
  1346. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1347. assert_eq!(accounts.alive_account_count_in_slot(1), 1);
  1348. }
  1349. #[test_case(MarkObsoleteAccounts::Enabled)]
  1350. #[test_case(MarkObsoleteAccounts::Disabled)]
  1351. fn test_clean_old_with_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
  1352. agave_logger::setup();
  1353. let accounts = AccountsDb::new_with_config(
  1354. Vec::new(),
  1355. AccountsDbConfig {
  1356. mark_obsolete_accounts,
  1357. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  1358. },
  1359. None,
  1360. Arc::default(),
  1361. );
  1362. let pubkey1 = solana_pubkey::new_rand();
  1363. let pubkey2 = solana_pubkey::new_rand();
  1364. let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1365. let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1366. //store an account
  1367. accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
  1368. accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice()));
  1369. accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice()));
  1370. accounts.store_for_tests((1, [(&pubkey2, &normal_account)].as_slice()));
  1371. //simulate slots are rooted after while
  1372. accounts.add_root_and_flush_write_cache(0);
  1373. accounts.add_root_and_flush_write_cache(1);
  1374. assert_eq!(accounts.alive_account_count_in_slot(1), 2);
  1375. accounts.print_accounts_stats("");
  1376. // With obsolete accounts enabled, slot 0 is cleaned during flush
  1377. if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
  1378. // even if rooted, old state isn't cleaned up
  1379. assert_eq!(accounts.alive_account_count_in_slot(0), 2);
  1380. accounts.clean_accounts_for_tests();
  1381. }
  1382. //Old state behind zero-lamport account is cleaned up
  1383. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1384. assert_eq!(accounts.alive_account_count_in_slot(1), 2);
  1385. }
  1386. #[test_case(MarkObsoleteAccounts::Enabled)]
  1387. #[test_case(MarkObsoleteAccounts::Disabled)]
  1388. fn test_clean_old_with_both_normal_and_zero_lamport_accounts(
  1389. mark_obsolete_accounts: MarkObsoleteAccounts,
  1390. ) {
  1391. agave_logger::setup();
  1392. let mut accounts = AccountsDb {
  1393. account_indexes: spl_token_mint_index_enabled(),
  1394. ..AccountsDb::new_with_config(
  1395. Vec::new(),
  1396. AccountsDbConfig {
  1397. mark_obsolete_accounts,
  1398. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  1399. },
  1400. None,
  1401. Arc::default(),
  1402. )
  1403. };
  1404. let pubkey1 = solana_pubkey::new_rand();
  1405. let pubkey2 = solana_pubkey::new_rand();
  1406. // Set up account to be added to secondary index
  1407. const SPL_TOKEN_INITIALIZED_OFFSET: usize = 108;
  1408. let mint_key = Pubkey::new_unique();
  1409. let mut account_data_with_mint = vec![0; spl_generic_token::token::Account::get_packed_len()];
  1410. account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.to_bytes()));
  1411. account_data_with_mint[SPL_TOKEN_INITIALIZED_OFFSET] = 1;
  1412. let mut normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1413. normal_account.set_owner(spl_generic_token::token::id());
  1414. normal_account.set_data(account_data_with_mint.clone());
  1415. let mut zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1416. zero_account.set_owner(spl_generic_token::token::id());
  1417. zero_account.set_data(account_data_with_mint);
  1418. //store an account
  1419. accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
  1420. accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
  1421. accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice()));
  1422. accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice()));
  1423. accounts.store_for_tests((2, [(&pubkey2, &normal_account)].as_slice()));
  1424. //simulate slots are rooted after while
  1425. accounts.add_root_and_flush_write_cache(0);
  1426. accounts.add_root_and_flush_write_cache(1);
  1427. accounts.add_root_and_flush_write_cache(2);
  1428. if mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
  1429. // With obsolete accounts enabled, slot 0 is cleaned during flush
  1430. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1431. } else {
  1432. //even if rooted, old state isn't cleaned up
  1433. assert_eq!(accounts.alive_account_count_in_slot(0), 2);
  1434. }
  1435. assert_eq!(accounts.alive_account_count_in_slot(1), 1);
  1436. assert_eq!(accounts.alive_account_count_in_slot(2), 1);
  1437. // Secondary index should still find both pubkeys
  1438. let mut found_accounts = HashSet::new();
  1439. let index_key = IndexKey::SplTokenMint(mint_key);
  1440. let bank_id = 0;
  1441. accounts
  1442. .accounts_index
  1443. .index_scan_accounts(
  1444. &Ancestors::default(),
  1445. bank_id,
  1446. index_key,
  1447. |key, _| {
  1448. found_accounts.insert(*key);
  1449. },
  1450. &ScanConfig::default(),
  1451. )
  1452. .unwrap();
  1453. assert_eq!(found_accounts.len(), 2);
  1454. assert!(found_accounts.contains(&pubkey1));
  1455. assert!(found_accounts.contains(&pubkey2));
  1456. {
  1457. accounts.account_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude {
  1458. exclude: true,
  1459. keys: [mint_key].iter().cloned().collect::<HashSet<Pubkey>>(),
  1460. });
  1461. // Secondary index can't be used - do normal scan: should still find both pubkeys
  1462. let mut found_accounts = HashSet::new();
  1463. let used_index = accounts
  1464. .index_scan_accounts(
  1465. &Ancestors::default(),
  1466. bank_id,
  1467. index_key,
  1468. |account| {
  1469. found_accounts.insert(*account.unwrap().0);
  1470. },
  1471. &ScanConfig::default(),
  1472. )
  1473. .unwrap();
  1474. assert!(!used_index);
  1475. assert_eq!(found_accounts.len(), 2);
  1476. assert!(found_accounts.contains(&pubkey1));
  1477. assert!(found_accounts.contains(&pubkey2));
  1478. accounts.account_indexes.keys = None;
  1479. // Secondary index can now be used since it isn't marked as excluded
  1480. let mut found_accounts = HashSet::new();
  1481. let used_index = accounts
  1482. .index_scan_accounts(
  1483. &Ancestors::default(),
  1484. bank_id,
  1485. index_key,
  1486. |account| {
  1487. found_accounts.insert(*account.unwrap().0);
  1488. },
  1489. &ScanConfig::default(),
  1490. )
  1491. .unwrap();
  1492. assert!(used_index);
  1493. assert_eq!(found_accounts.len(), 2);
  1494. assert!(found_accounts.contains(&pubkey1));
  1495. assert!(found_accounts.contains(&pubkey2));
  1496. accounts.account_indexes.keys = None;
  1497. }
  1498. accounts.clean_accounts_for_tests();
  1499. //both zero lamport and normal accounts are cleaned up
  1500. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1501. // The only store to slot 1 was a zero lamport account, should
  1502. // be purged by zero-lamport cleaning logic because slot 1 is
  1503. // rooted
  1504. assert_eq!(accounts.alive_account_count_in_slot(1), 0);
  1505. assert_eq!(accounts.alive_account_count_in_slot(2), 1);
  1506. // `pubkey1`, a zero lamport account, should no longer exist in accounts index
  1507. // because it has been removed by the clean
  1508. assert!(!accounts.accounts_index.contains_with(&pubkey1, None, None));
  1509. // Secondary index should have purged `pubkey1` as well
  1510. let mut found_accounts = vec![];
  1511. accounts
  1512. .accounts_index
  1513. .index_scan_accounts(
  1514. &Ancestors::default(),
  1515. bank_id,
  1516. IndexKey::SplTokenMint(mint_key),
  1517. |key, _| found_accounts.push(*key),
  1518. &ScanConfig::default(),
  1519. )
  1520. .unwrap();
  1521. assert_eq!(found_accounts, vec![pubkey2]);
  1522. }
  1523. #[test_case(MarkObsoleteAccounts::Enabled)]
  1524. #[test_case(MarkObsoleteAccounts::Disabled)]
  1525. fn test_clean_max_slot_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
  1526. agave_logger::setup();
  1527. let accounts = AccountsDb::new_with_config(
  1528. Vec::new(),
  1529. AccountsDbConfig {
  1530. mark_obsolete_accounts,
  1531. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  1532. },
  1533. None,
  1534. Arc::default(),
  1535. );
  1536. let pubkey = solana_pubkey::new_rand();
  1537. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1538. let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  1539. // store an account, make it a zero lamport account
  1540. // in slot 1
  1541. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  1542. accounts.store_for_tests((1, [(&pubkey, &zero_account)].as_slice()));
  1543. // simulate slots are rooted after while
  1544. accounts.add_root_and_flush_write_cache(0);
  1545. accounts.add_root_and_flush_write_cache(1);
  1546. // Clean is performed as part of flush with obsolete accounts marked, so explicit clean isn't needed
  1547. if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
  1548. // Only clean up to account 0, should not purge slot 0 based on
  1549. // updates in later slots in slot 1
  1550. assert_eq!(accounts.alive_account_count_in_slot(0), 1);
  1551. assert_eq!(accounts.alive_account_count_in_slot(1), 1);
  1552. accounts.clean_accounts(Some(0), false, &EpochSchedule::default());
  1553. assert_eq!(accounts.alive_account_count_in_slot(0), 1);
  1554. }
  1555. assert_eq!(accounts.alive_account_count_in_slot(1), 1);
  1556. assert!(accounts.accounts_index.contains_with(&pubkey, None, None));
  1557. // Now the account can be cleaned up
  1558. accounts.clean_accounts(Some(1), false, &EpochSchedule::default());
  1559. assert_eq!(accounts.alive_account_count_in_slot(0), 0);
  1560. assert_eq!(accounts.alive_account_count_in_slot(1), 0);
  1561. // The zero lamport account, should no longer exist in accounts index
  1562. // because it has been removed
  1563. assert!(!accounts.accounts_index.contains_with(&pubkey, None, None));
  1564. }
  1565. fn assert_no_stores(accounts: &AccountsDb, slot: Slot) {
  1566. let store = accounts.storage.get_slot_storage_entry(slot);
  1567. assert!(store.is_none());
  1568. }
  1569. #[test]
  1570. fn test_accounts_db_purge_keep_live() {
  1571. agave_logger::setup();
  1572. let some_lamport = 223;
  1573. let zero_lamport = 0;
  1574. let no_data = 0;
  1575. let owner = *AccountSharedData::default().owner();
  1576. let account = AccountSharedData::new(some_lamport, no_data, &owner);
  1577. let pubkey = solana_pubkey::new_rand();
  1578. let account2 = AccountSharedData::new(some_lamport, no_data, &owner);
  1579. let pubkey2 = solana_pubkey::new_rand();
  1580. let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
  1581. let accounts = AccountsDb::new_single_for_tests();
  1582. accounts.add_root_and_flush_write_cache(0);
  1583. // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
  1584. // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
  1585. accounts.set_latest_full_snapshot_slot(0);
  1586. // Step A
  1587. let mut current_slot = 1;
  1588. accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice()));
  1589. // Store another live account to slot 1 which will prevent any purge
  1590. // since the store count will not be zero
  1591. accounts.store_for_tests((current_slot, [(&pubkey2, &account2)].as_slice()));
  1592. accounts.add_root_and_flush_write_cache(current_slot);
  1593. let (slot1, account_info1) = accounts
  1594. .accounts_index
  1595. .get_with_and_then(&pubkey, None, None, false, |(slot, account_info)| {
  1596. (slot, account_info)
  1597. })
  1598. .unwrap();
  1599. let (slot2, account_info2) = accounts
  1600. .accounts_index
  1601. .get_with_and_then(&pubkey2, None, None, false, |(slot, account_info)| {
  1602. (slot, account_info)
  1603. })
  1604. .unwrap();
  1605. assert_eq!(slot1, current_slot);
  1606. assert_eq!(slot1, slot2);
  1607. assert_eq!(account_info1.store_id(), account_info2.store_id());
  1608. // Step B
  1609. current_slot += 1;
  1610. let zero_lamport_slot = current_slot;
  1611. accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice()));
  1612. accounts.add_root_and_flush_write_cache(current_slot);
  1613. accounts.assert_load_account(current_slot, pubkey, zero_lamport);
  1614. current_slot += 1;
  1615. accounts.add_root_and_flush_write_cache(current_slot);
  1616. accounts.print_accounts_stats("pre_purge");
  1617. accounts.clean_accounts_for_tests();
  1618. accounts.print_accounts_stats("post_purge");
  1619. // The earlier entry for pubkey in the account index is purged,
  1620. let (slot_list_len, index_slot) = accounts.accounts_index.get_and_then(&pubkey, |entry| {
  1621. let slot_list = entry.unwrap().slot_list_read_lock();
  1622. (false, (slot_list.len(), slot_list[0].0))
  1623. });
  1624. assert_eq!(slot_list_len, 1);
  1625. // Zero lamport entry was not the one purged
  1626. assert_eq!(index_slot, zero_lamport_slot);
  1627. // The ref count should still be 2 because no slots were purged
  1628. accounts.assert_ref_count(&pubkey, 2);
  1629. // storage for slot 1 had 2 accounts, now has 1 after pubkey 1
  1630. // was reclaimed
  1631. accounts.check_storage(1, 1, 2);
  1632. // storage for slot 2 had 1 accounts, now has 1
  1633. accounts.check_storage(2, 1, 1);
  1634. }
  1635. #[test]
  1636. fn test_accounts_db_purge1() {
  1637. agave_logger::setup();
  1638. let some_lamport = 223;
  1639. let zero_lamport = 0;
  1640. let no_data = 0;
  1641. let owner = *AccountSharedData::default().owner();
  1642. let account = AccountSharedData::new(some_lamport, no_data, &owner);
  1643. let pubkey = solana_pubkey::new_rand();
  1644. let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
  1645. let accounts = AccountsDb::new_single_for_tests();
  1646. accounts.add_root(0);
  1647. let mut current_slot = 1;
  1648. accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice()));
  1649. accounts.add_root_and_flush_write_cache(current_slot);
  1650. current_slot += 1;
  1651. accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice()));
  1652. accounts.add_root_and_flush_write_cache(current_slot);
  1653. accounts.assert_load_account(current_slot, pubkey, zero_lamport);
  1654. // Otherwise slot 2 will not be removed
  1655. current_slot += 1;
  1656. accounts.add_root_and_flush_write_cache(current_slot);
  1657. accounts.print_accounts_stats("pre_purge");
  1658. let ancestors = linear_ancestors(current_slot);
  1659. info!("ancestors: {ancestors:?}");
  1660. let hash = accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot);
  1661. accounts.clean_accounts_for_tests();
  1662. assert_eq!(
  1663. accounts.calculate_accounts_lt_hash_at_startup_from_index(&ancestors, current_slot),
  1664. hash
  1665. );
  1666. accounts.print_accounts_stats("post_purge");
  1667. // Make sure the index is for pubkey cleared
  1668. assert!(!accounts.accounts_index.contains(&pubkey));
  1669. // slot 1 & 2 should not have any stores
  1670. assert_no_stores(&accounts, 1);
  1671. assert_no_stores(&accounts, 2);
  1672. }
  1673. #[test]
  1674. #[ignore]
  1675. fn test_store_account_stress() {
  1676. let slot = 42;
  1677. let num_threads = 2;
  1678. let min_file_bytes = std::mem::size_of::<StoredMeta>() + std::mem::size_of::<AccountMeta>();
  1679. let db = Arc::new(AccountsDb {
  1680. file_size: min_file_bytes as u64,
  1681. ..AccountsDb::new_single_for_tests()
  1682. });
  1683. db.add_root(slot);
  1684. let thread_hdls: Vec<_> = (0..num_threads)
  1685. .map(|_| {
  1686. let db = db.clone();
  1687. std::thread::Builder::new()
  1688. .name("account-writers".to_string())
  1689. .spawn(move || {
  1690. let pubkey = solana_pubkey::new_rand();
  1691. let mut account = AccountSharedData::new(1, 0, &pubkey);
  1692. let mut i = 0;
  1693. loop {
  1694. let account_bal = thread_rng().gen_range(1..99);
  1695. account.set_lamports(account_bal);
  1696. db.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
  1697. let (account, slot) = db
  1698. .load_without_fixed_root(&Ancestors::default(), &pubkey)
  1699. .unwrap_or_else(|| {
  1700. panic!("Could not fetch stored account {pubkey}, iter {i}")
  1701. });
  1702. assert_eq!(slot, slot);
  1703. assert_eq!(account.lamports(), account_bal);
  1704. i += 1;
  1705. }
  1706. })
  1707. .unwrap()
  1708. })
  1709. .collect();
  1710. for t in thread_hdls {
  1711. t.join().unwrap();
  1712. }
  1713. }
  1714. #[test]
  1715. fn test_accountsdb_scan_accounts() {
  1716. agave_logger::setup();
  1717. let db = AccountsDb::new_single_for_tests();
  1718. let key = Pubkey::default();
  1719. let key0 = solana_pubkey::new_rand();
  1720. let account0 = AccountSharedData::new(1, 0, &key);
  1721. db.store_for_tests((0, [(&key0, &account0)].as_slice()));
  1722. let key1 = solana_pubkey::new_rand();
  1723. let account1 = AccountSharedData::new(2, 0, &key);
  1724. db.store_for_tests((1, [(&key1, &account1)].as_slice()));
  1725. let ancestors = vec![(0, 0)].into_iter().collect();
  1726. let mut accounts = Vec::new();
  1727. db.scan_accounts(
  1728. &ancestors,
  1729. 0,
  1730. |scan_result| {
  1731. if let Some((_, account, _)) = scan_result {
  1732. accounts.push(account);
  1733. }
  1734. },
  1735. &ScanConfig::default(),
  1736. )
  1737. .expect("should scan accounts");
  1738. assert_eq!(accounts, vec![account0]);
  1739. let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
  1740. let mut accounts = Vec::new();
  1741. db.scan_accounts(
  1742. &ancestors,
  1743. 0,
  1744. |scan_result| {
  1745. if let Some((_, account, _)) = scan_result {
  1746. accounts.push(account);
  1747. }
  1748. },
  1749. &ScanConfig::default(),
  1750. )
  1751. .expect("should scan accounts");
  1752. assert_eq!(accounts.len(), 2);
  1753. }
  1754. #[test]
  1755. fn test_cleanup_key_not_removed() {
  1756. agave_logger::setup();
  1757. let db = AccountsDb::new_single_for_tests();
  1758. let key = Pubkey::default();
  1759. let key0 = solana_pubkey::new_rand();
  1760. let account0 = AccountSharedData::new(1, 0, &key);
  1761. db.store_for_tests((0, [(&key0, &account0)].as_slice()));
  1762. let key1 = solana_pubkey::new_rand();
  1763. let account1 = AccountSharedData::new(2, 0, &key);
  1764. db.store_for_tests((1, [(&key1, &account1)].as_slice()));
  1765. db.print_accounts_stats("pre");
  1766. let slots: HashSet<Slot> = vec![1].into_iter().collect();
  1767. let purge_keys = [(key1, slots)];
  1768. let _ = db.purge_keys_exact(purge_keys);
  1769. let account2 = AccountSharedData::new(3, 0, &key);
  1770. db.store_for_tests((2, [(&key1, &account2)].as_slice()));
  1771. db.print_accounts_stats("post");
  1772. let ancestors = vec![(2, 0)].into_iter().collect();
  1773. assert_eq!(
  1774. db.load_without_fixed_root(&ancestors, &key1)
  1775. .unwrap()
  1776. .0
  1777. .lamports(),
  1778. 3
  1779. );
  1780. }
  1781. #[test]
  1782. fn test_store_large_account() {
  1783. agave_logger::setup();
  1784. let db = AccountsDb::new_single_for_tests();
  1785. let key = Pubkey::default();
  1786. let data_len = DEFAULT_FILE_SIZE as usize + 7;
  1787. let account = AccountSharedData::new(1, data_len, &key);
  1788. db.store_for_tests((0, [(&key, &account)].as_slice()));
  1789. let ancestors = vec![(0, 0)].into_iter().collect();
  1790. let ret = db.load_without_fixed_root(&ancestors, &key).unwrap();
  1791. assert_eq!(ret.0.data().len(), data_len);
  1792. }
  1793. #[test]
  1794. fn test_hash_stored_account() {
  1795. // Number are just sequential.
  1796. let pubkey = Pubkey::new_from_array([
  1797. 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
  1798. 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36,
  1799. 0x37, 0x38,
  1800. ]);
  1801. let lamports = 0x39_3a_3b_3c_3d_3e_3f_40;
  1802. let rent_epoch = 0x41_42_43_44_45_46_47_48;
  1803. let owner = Pubkey::new_from_array([
  1804. 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
  1805. 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
  1806. 0x67, 0x68,
  1807. ]);
  1808. const ACCOUNT_DATA_LEN: usize = 3;
  1809. let data: [u8; ACCOUNT_DATA_LEN] = [0x69, 0x6a, 0x6b];
  1810. let executable = false;
  1811. let stored_account = StoredAccountInfo {
  1812. pubkey: &pubkey,
  1813. lamports,
  1814. owner: &owner,
  1815. data: &data,
  1816. executable,
  1817. rent_epoch,
  1818. };
  1819. let account = create_account_shared_data(&stored_account);
  1820. let expected_account_hash = LtHashChecksum([
  1821. 160, 29, 105, 138, 56, 166, 40, 55, 224, 231, 29, 208, 68, 46, 190, 89, 141, 20, 65, 86,
  1822. 115, 14, 182, 125, 174, 181, 165, 0, 72, 175, 105, 177,
  1823. ]);
  1824. assert_eq!(
  1825. AccountsDb::lt_hash_account(&stored_account, stored_account.pubkey())
  1826. .0
  1827. .checksum(),
  1828. expected_account_hash,
  1829. "StoredAccountInfo's data layout might be changed; update hashing if needed."
  1830. );
  1831. assert_eq!(
  1832. AccountsDb::lt_hash_account(&account, stored_account.pubkey())
  1833. .0
  1834. .checksum(),
  1835. expected_account_hash,
  1836. "Account-based hashing must be consistent with StoredAccountInfo-based one."
  1837. );
  1838. }
  1839. // something we can get a ref to
  1840. pub static EPOCH_SCHEDULE: std::sync::LazyLock<EpochSchedule> =
  1841. std::sync::LazyLock::new(EpochSchedule::default);
  1842. #[test]
  1843. fn test_verify_bank_capitalization() {
  1844. for pass in 0..2 {
  1845. agave_logger::setup();
  1846. let db = AccountsDb::new_single_for_tests();
  1847. let key = solana_pubkey::new_rand();
  1848. let some_data_len = 0;
  1849. let some_slot: Slot = 0;
  1850. let account = AccountSharedData::new(1, some_data_len, &key);
  1851. let ancestors = vec![(some_slot, 0)].into_iter().collect();
  1852. db.store_for_tests((some_slot, [(&key, &account)].as_slice()));
  1853. if pass == 0 {
  1854. db.add_root_and_flush_write_cache(some_slot);
  1855. assert_eq!(
  1856. db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot),
  1857. 1
  1858. );
  1859. continue;
  1860. }
  1861. let native_account_pubkey = solana_pubkey::new_rand();
  1862. db.store_for_tests((
  1863. some_slot,
  1864. [(
  1865. &native_account_pubkey,
  1866. &create_loadable_account_for_test("foo"),
  1867. )]
  1868. .as_slice(),
  1869. ));
  1870. db.add_root_and_flush_write_cache(some_slot);
  1871. assert_eq!(
  1872. db.calculate_capitalization_at_startup_from_index(&ancestors, some_slot),
  1873. 2
  1874. );
  1875. }
  1876. }
  1877. #[test]
  1878. fn test_storage_finder() {
  1879. agave_logger::setup();
  1880. let db = AccountsDb {
  1881. file_size: 16 * 1024,
  1882. ..AccountsDb::new_single_for_tests()
  1883. };
  1884. let key = solana_pubkey::new_rand();
  1885. let lamports = 100;
  1886. let data_len = 8190;
  1887. let account = AccountSharedData::new(lamports, data_len, &solana_pubkey::new_rand());
  1888. // pre-populate with a smaller empty store
  1889. db.create_and_insert_store(1, 8192, "test_storage_finder");
  1890. db.store_for_tests((1, [(&key, &account)].as_slice()));
  1891. }
  1892. #[test]
  1893. fn test_get_snapshot_storages_empty() {
  1894. let db = AccountsDb::new_single_for_tests();
  1895. assert!(db.get_storages(..=0).0.is_empty());
  1896. }
  1897. #[test]
  1898. fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
  1899. let db = AccountsDb::new_single_for_tests();
  1900. let key = Pubkey::default();
  1901. let account = AccountSharedData::new(1, 0, &key);
  1902. let before_slot = 0;
  1903. let base_slot = before_slot + 1;
  1904. let after_slot = base_slot + 1;
  1905. db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
  1906. db.add_root_and_flush_write_cache(base_slot);
  1907. assert!(db.get_storages(..=before_slot).0.is_empty());
  1908. assert_eq!(1, db.get_storages(..=base_slot).0.len());
  1909. assert_eq!(1, db.get_storages(..=after_slot).0.len());
  1910. }
  1911. #[test]
  1912. fn test_get_snapshot_storages_only_non_empty() {
  1913. for pass in 0..2 {
  1914. let db = AccountsDb::new_single_for_tests();
  1915. let key = Pubkey::default();
  1916. let account = AccountSharedData::new(1, 0, &key);
  1917. let base_slot = 0;
  1918. let after_slot = base_slot + 1;
  1919. db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
  1920. if pass == 0 {
  1921. db.add_root_and_flush_write_cache(base_slot);
  1922. db.storage.remove(&base_slot, false);
  1923. assert!(db.get_storages(..=after_slot).0.is_empty());
  1924. continue;
  1925. }
  1926. db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
  1927. db.add_root_and_flush_write_cache(base_slot);
  1928. assert_eq!(1, db.get_storages(..=after_slot).0.len());
  1929. }
  1930. }
  1931. #[test]
  1932. fn test_get_snapshot_storages_only_roots() {
  1933. let db = AccountsDb::new_single_for_tests();
  1934. let key = Pubkey::default();
  1935. let account = AccountSharedData::new(1, 0, &key);
  1936. let base_slot = 0;
  1937. let after_slot = base_slot + 1;
  1938. db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
  1939. assert!(db.get_storages(..=after_slot).0.is_empty());
  1940. db.add_root_and_flush_write_cache(base_slot);
  1941. assert_eq!(1, db.get_storages(..=after_slot).0.len());
  1942. }
  1943. #[test]
  1944. fn test_get_snapshot_storages_exclude_empty() {
  1945. let db = AccountsDb::new_single_for_tests();
  1946. let key = Pubkey::default();
  1947. let account = AccountSharedData::new(1, 0, &key);
  1948. let base_slot = 0;
  1949. let after_slot = base_slot + 1;
  1950. db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
  1951. db.add_root_and_flush_write_cache(base_slot);
  1952. assert_eq!(1, db.get_storages(..=after_slot).0.len());
  1953. db.storage
  1954. .get_slot_storage_entry(0)
  1955. .unwrap()
  1956. .remove_accounts(0, 1);
  1957. assert!(db.get_storages(..=after_slot).0.is_empty());
  1958. }
  1959. #[test]
  1960. fn test_get_snapshot_storages_with_base_slot() {
  1961. let db = AccountsDb::new_single_for_tests();
  1962. let key = Pubkey::default();
  1963. let account = AccountSharedData::new(1, 0, &key);
  1964. let slot = 10;
  1965. db.store_for_tests((slot, [(&key, &account)].as_slice()));
  1966. db.add_root_and_flush_write_cache(slot);
  1967. assert_eq!(0, db.get_storages(slot + 1..=slot + 1).0.len());
  1968. assert_eq!(1, db.get_storages(slot..=slot + 1).0.len());
  1969. }
  1970. define_accounts_db_test!(
  1971. test_storage_remove_account_double_remove,
  1972. panic = "Too many bytes or accounts removed from storage! slot: 0, id: 0",
  1973. |accounts| {
  1974. let pubkey = solana_pubkey::new_rand();
  1975. let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  1976. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  1977. accounts.add_root_and_flush_write_cache(0);
  1978. let storage_entry = accounts.storage.get_slot_storage_entry(0).unwrap();
  1979. storage_entry.remove_accounts(0, 1);
  1980. storage_entry.remove_accounts(0, 1);
  1981. }
  1982. );
  1983. fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_size: u64) {
  1984. let pubkey1 = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
  1985. let pubkey2 = Pubkey::from_str("My22211111111111111111111111111111111111111").unwrap();
  1986. let pubkey3 = Pubkey::from_str("My33311111111111111111111111111111111111111").unwrap();
  1987. let old_lamport = 223;
  1988. let zero_lamport = 0;
  1989. let dummy_lamport = 999_999;
  1990. // size data so only 1 fits in a 4k store
  1991. let data_size = 2200;
  1992. let owner = *AccountSharedData::default().owner();
  1993. let account = AccountSharedData::new(old_lamport, data_size, &owner);
  1994. let account2 = AccountSharedData::new(old_lamport + 100_001, data_size, &owner);
  1995. let account3 = AccountSharedData::new(old_lamport + 100_002, data_size, &owner);
  1996. let account4 = AccountSharedData::new(dummy_lamport, data_size, &owner);
  1997. let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner);
  1998. let mut current_slot = 0;
  1999. accounts.file_size = store_size;
  2000. // A: Initialize AccountsDb with pubkey1 and pubkey2
  2001. current_slot += 1;
  2002. if store1_first {
  2003. accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice()));
  2004. accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice()));
  2005. } else {
  2006. accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice()));
  2007. accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice()));
  2008. }
  2009. accounts.add_root_and_flush_write_cache(current_slot);
  2010. info!("post A");
  2011. accounts.print_accounts_stats("Post-A");
  2012. // B: Test multiple updates to pubkey1 in a single slot/storage
  2013. current_slot += 1;
  2014. assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
  2015. accounts.assert_ref_count(&pubkey1, 1);
  2016. accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice()));
  2017. accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice()));
  2018. accounts.add_root_and_flush_write_cache(current_slot);
  2019. assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
  2020. // Stores to same pubkey, same slot only count once towards the
  2021. // ref count
  2022. accounts.assert_ref_count(&pubkey1, 2);
  2023. accounts.add_root_and_flush_write_cache(current_slot);
  2024. accounts.print_accounts_stats("Post-B pre-clean");
  2025. accounts.clean_accounts_for_tests();
  2026. info!("post B");
  2027. accounts.print_accounts_stats("Post-B");
  2028. // C: more updates to trigger clean of previous updates
  2029. current_slot += 1;
  2030. accounts.assert_ref_count(&pubkey1, 2);
  2031. accounts.store_for_tests((current_slot, [(&pubkey1, &account3)].as_slice()));
  2032. accounts.store_for_tests((current_slot, [(&pubkey2, &account3)].as_slice()));
  2033. accounts.store_for_tests((current_slot, [(&pubkey3, &account4)].as_slice()));
  2034. accounts.add_root_and_flush_write_cache(current_slot);
  2035. accounts.assert_ref_count(&pubkey1, 3);
  2036. info!("post C");
  2037. accounts.print_accounts_stats("Post-C");
  2038. // D: Make all keys 0-lamport, cleans all keys
  2039. current_slot += 1;
  2040. accounts.assert_ref_count(&pubkey1, 3);
  2041. accounts.store_for_tests((current_slot, [(&pubkey1, &zero_lamport_account)].as_slice()));
  2042. accounts.store_for_tests((current_slot, [(&pubkey2, &zero_lamport_account)].as_slice()));
  2043. accounts.store_for_tests((current_slot, [(&pubkey3, &zero_lamport_account)].as_slice()));
  2044. let snapshot_stores = accounts.get_storages(..=current_slot).0;
  2045. let total_accounts: usize = snapshot_stores.iter().map(|s| s.accounts_count()).sum();
  2046. assert!(!snapshot_stores.is_empty());
  2047. assert!(total_accounts > 0);
  2048. info!("post D");
  2049. accounts.print_accounts_stats("Post-D");
  2050. accounts.add_root_and_flush_write_cache(current_slot);
  2051. accounts.clean_accounts_for_tests();
  2052. accounts.print_accounts_stats("Post-D clean");
  2053. let total_accounts_post_clean: usize = snapshot_stores.iter().map(|s| s.accounts_count()).sum();
  2054. assert_eq!(total_accounts, total_accounts_post_clean);
  2055. // should clean all 3 pubkeys
  2056. accounts.assert_ref_count(&pubkey1, 0);
  2057. accounts.assert_ref_count(&pubkey2, 0);
  2058. accounts.assert_ref_count(&pubkey3, 0);
  2059. }
  2060. // Setup 3 scenarios which try to differentiate between pubkey1 being in an
  2061. // Available slot or a Full slot which would cause a different reset behavior
  2062. // when pubkey1 is cleaned and therefore cause the ref count to be incorrect
  2063. // preventing a removal of that key.
  2064. //
  2065. // do stores with a 4mb size so only 1 store is created per slot
  2066. define_accounts_db_test!(test_full_clean_refcount_no_first_4m, |accounts| {
  2067. do_full_clean_refcount(accounts, false, 4 * 1024 * 1024);
  2068. });
  2069. // do stores with a 4k size and store pubkey1 first
  2070. define_accounts_db_test!(test_full_clean_refcount_no_first_4k, |accounts| {
  2071. do_full_clean_refcount(accounts, false, 4 * 1024);
  2072. });
  2073. // do stores with a 4k size and store pubkey1 2nd
  2074. define_accounts_db_test!(test_full_clean_refcount_first_4k, |accounts| {
  2075. do_full_clean_refcount(accounts, true, 4 * 1024);
  2076. });
  2077. #[test]
  2078. fn test_clean_stored_dead_slots_empty() {
  2079. let accounts = AccountsDb::new_single_for_tests();
  2080. let mut dead_slots = IntSet::default();
  2081. dead_slots.insert(10);
  2082. accounts.clean_stored_dead_slots(&dead_slots, None, &HashSet::default());
  2083. }
  2084. #[test]
  2085. fn test_shrink_all_slots_none() {
  2086. let epoch_schedule = EpochSchedule::default();
  2087. for startup in &[false, true] {
  2088. let accounts = AccountsDb::new_single_for_tests();
  2089. for _ in 0..10 {
  2090. accounts.shrink_candidate_slots(&epoch_schedule);
  2091. }
  2092. accounts.shrink_all_slots(*startup, &EpochSchedule::default(), None);
  2093. }
  2094. }
  2095. #[test]
  2096. fn test_shrink_candidate_slots() {
  2097. agave_logger::setup();
  2098. let mut accounts = AccountsDb::new_single_for_tests();
  2099. let pubkey_count = 30000;
  2100. let pubkeys: Vec<_> = (0..pubkey_count)
  2101. .map(|_| solana_pubkey::new_rand())
  2102. .collect();
  2103. let some_lamport = 223;
  2104. let no_data = 0;
  2105. let owner = *AccountSharedData::default().owner();
  2106. let account = AccountSharedData::new(some_lamport, no_data, &owner);
  2107. let mut current_slot = 0;
  2108. current_slot += 1;
  2109. for pubkey in &pubkeys {
  2110. accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice()));
  2111. }
  2112. let shrink_slot = current_slot;
  2113. accounts.add_root_and_flush_write_cache(current_slot);
  2114. current_slot += 1;
  2115. let pubkey_count_after_shrink = 25000;
  2116. let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
  2117. for pubkey in updated_pubkeys {
  2118. accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice()));
  2119. }
  2120. accounts.add_root_and_flush_write_cache(current_slot);
  2121. accounts.clean_accounts_for_tests();
  2122. assert_eq!(
  2123. pubkey_count,
  2124. accounts.all_account_count_in_accounts_file(shrink_slot)
  2125. );
  2126. // Only, try to shrink stale slots, nothing happens because shrink ratio
  2127. // is not small enough to do a shrink
  2128. // Note this shrink ratio had to change because we are WAY over-allocating append vecs when we flush the write cache at the moment.
  2129. accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio: 0.4 };
  2130. accounts.shrink_candidate_slots(&EpochSchedule::default());
  2131. assert_eq!(
  2132. pubkey_count,
  2133. accounts.all_account_count_in_accounts_file(shrink_slot)
  2134. );
  2135. // Now, do full-shrink.
  2136. accounts.shrink_all_slots(false, &EpochSchedule::default(), None);
  2137. assert_eq!(
  2138. pubkey_count_after_shrink,
  2139. accounts.all_account_count_in_accounts_file(shrink_slot)
  2140. );
  2141. }
  2142. /// This test creates an ancient storage with three alive accounts
  2143. /// of various sizes. It then simulates killing one of the
  2144. /// accounts in a more recent (non-ancient) slot by overwriting
  2145. /// the account that has the smallest data size. The dead account
  2146. /// is expected to be deleted from its ancient storage in the
  2147. /// process of shrinking candidate slots. The capacity of the
  2148. /// storage after shrinking is expected to be the sum of alive
  2149. /// bytes of the two remaining alive ancient accounts.
  2150. #[test]
  2151. fn test_shrink_candidate_slots_with_dead_ancient_account() {
  2152. agave_logger::setup();
  2153. let epoch_schedule = EpochSchedule::default();
  2154. let db = AccountsDb::new_single_for_tests();
  2155. const ACCOUNT_DATA_SIZES: &[usize] = &[1000, 2000, 150];
  2156. let accounts: Vec<_> = ACCOUNT_DATA_SIZES
  2157. .iter()
  2158. .map(|data_size| {
  2159. (
  2160. Pubkey::new_unique(),
  2161. AccountSharedData::new(1, *data_size, &Pubkey::default()),
  2162. )
  2163. })
  2164. .collect();
  2165. let accounts: Vec<_> = accounts
  2166. .iter()
  2167. .map(|(pubkey, account)| (pubkey, account))
  2168. .collect();
  2169. let starting_ancient_slot = 1;
  2170. db.store_for_tests((starting_ancient_slot, accounts.as_slice()));
  2171. db.add_root_and_flush_write_cache(starting_ancient_slot);
  2172. let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap();
  2173. let ancient_accounts = db.get_unique_accounts_from_storage(&storage);
  2174. // Check that three accounts are indeed present in the combined storage.
  2175. assert_eq!(ancient_accounts.stored_accounts.len(), 3);
  2176. // Find an ancient account with smallest data length.
  2177. // This will be a dead account, overwritten in the current slot.
  2178. let modified_account_pubkey = ancient_accounts
  2179. .stored_accounts
  2180. .iter()
  2181. .min_by(|a, b| a.data_len.cmp(&b.data_len))
  2182. .unwrap()
  2183. .pubkey;
  2184. let modified_account_owner = *AccountSharedData::default().owner();
  2185. let modified_account = AccountSharedData::new(223, 0, &modified_account_owner);
  2186. let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap().abs();
  2187. let current_slot = epoch_schedule.slots_per_epoch + ancient_append_vec_offset as u64 + 1;
  2188. // Simulate killing of the ancient account by overwriting it in the current slot.
  2189. db.store_for_tests((
  2190. current_slot,
  2191. [(&modified_account_pubkey, &modified_account)].as_slice(),
  2192. ));
  2193. db.add_root_and_flush_write_cache(current_slot);
  2194. // This should remove the dead ancient account from the index.
  2195. db.clean_accounts_for_tests();
  2196. db.shrink_ancient_slots(&epoch_schedule);
  2197. let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap();
  2198. let created_accounts = db.get_unique_accounts_from_storage(&storage);
  2199. // The dead account should still be in the ancient storage,
  2200. // because the storage wouldn't be shrunk with normal alive to
  2201. // capacity ratio.
  2202. assert_eq!(created_accounts.stored_accounts.len(), 3);
  2203. db.shrink_candidate_slots(&epoch_schedule);
  2204. let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap();
  2205. let created_accounts = db.get_unique_accounts_from_storage(&storage);
  2206. // At this point the dead ancient account should be removed
  2207. // and storage capacity shrunk to the sum of alive bytes of
  2208. // accounts it holds. This is the data lengths of the
  2209. // accounts plus the length of their metadata.
  2210. assert_eq!(
  2211. created_accounts.capacity as usize,
  2212. aligned_stored_size(1000) + aligned_stored_size(2000)
  2213. );
  2214. // The above check works only when the AppendVec storage is
  2215. // used. More generally the pubkey of the smallest account
  2216. // shouldn't be present in the shrunk storage, which is
  2217. // validated by the following scan of the storage accounts.
  2218. storage
  2219. .accounts
  2220. .scan_pubkeys(|pubkey| {
  2221. assert_ne!(pubkey, &modified_account_pubkey);
  2222. })
  2223. .expect("must scan accounts storage");
  2224. }
  2225. #[test]
  2226. fn test_select_candidates_by_total_usage_no_candidates() {
  2227. // no input candidates -- none should be selected
  2228. agave_logger::setup();
  2229. let candidates = ShrinkCandidates::default();
  2230. let db = AccountsDb::new_single_for_tests();
  2231. let (selected_candidates, next_candidates) =
  2232. db.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO);
  2233. assert_eq!(0, selected_candidates.len());
  2234. assert_eq!(0, next_candidates.len());
  2235. }
  2236. #[test_case(StorageAccess::Mmap)]
  2237. #[test_case(StorageAccess::File)]
  2238. fn test_select_candidates_by_total_usage_3_way_split_condition(storage_access: StorageAccess) {
  2239. // three candidates, one selected for shrink, one is put back to the candidate list and one is ignored
  2240. agave_logger::setup();
  2241. let mut candidates = ShrinkCandidates::default();
  2242. let db = AccountsDb::new_single_for_tests();
  2243. let common_store_path = Path::new("");
  2244. let store_file_size = 100;
  2245. let store1_slot = 11;
  2246. let store1 = Arc::new(AccountStorageEntry::new(
  2247. common_store_path,
  2248. store1_slot,
  2249. store1_slot as AccountsFileId,
  2250. store_file_size,
  2251. AccountsFileProvider::AppendVec,
  2252. storage_access,
  2253. ));
  2254. db.storage.insert(store1_slot, Arc::clone(&store1));
  2255. store1.alive_bytes.store(0, Ordering::Release);
  2256. candidates.insert(store1_slot);
  2257. let store2_slot = 22;
  2258. let store2 = Arc::new(AccountStorageEntry::new(
  2259. common_store_path,
  2260. store2_slot,
  2261. store2_slot as AccountsFileId,
  2262. store_file_size,
  2263. AccountsFileProvider::AppendVec,
  2264. storage_access,
  2265. ));
  2266. db.storage.insert(store2_slot, Arc::clone(&store2));
  2267. store2
  2268. .alive_bytes
  2269. .store(store_file_size as usize / 2, Ordering::Release);
  2270. candidates.insert(store2_slot);
  2271. let store3_slot = 33;
  2272. let store3 = Arc::new(AccountStorageEntry::new(
  2273. common_store_path,
  2274. store3_slot,
  2275. store3_slot as AccountsFileId,
  2276. store_file_size,
  2277. AccountsFileProvider::AppendVec,
  2278. storage_access,
  2279. ));
  2280. db.storage.insert(store3_slot, Arc::clone(&store3));
  2281. store3
  2282. .alive_bytes
  2283. .store(store_file_size as usize, Ordering::Release);
  2284. candidates.insert(store3_slot);
  2285. // Set the target alive ratio to 0.6 so that we can just get rid of store1, the remaining two stores
  2286. // alive ratio can be > the target ratio: the actual ratio is 0.75 because of 150 alive bytes / 200 total bytes.
  2287. // The target ratio is also set to larger than store2's alive ratio: 0.5 so that it would be added
  2288. // to the candidates list for next round.
  2289. let target_alive_ratio = 0.6;
  2290. let (selected_candidates, next_candidates) =
  2291. db.select_candidates_by_total_usage(&candidates, target_alive_ratio);
  2292. assert_eq!(1, selected_candidates.len());
  2293. assert!(selected_candidates.contains(&store1_slot));
  2294. assert_eq!(1, next_candidates.len());
  2295. assert!(next_candidates.contains(&store2_slot));
  2296. }
  2297. #[test_case(StorageAccess::Mmap)]
  2298. #[test_case(StorageAccess::File)]
  2299. fn test_select_candidates_by_total_usage_2_way_split_condition(storage_access: StorageAccess) {
  2300. // three candidates, 2 are selected for shrink, one is ignored
  2301. agave_logger::setup();
  2302. let db = AccountsDb::new_single_for_tests();
  2303. let mut candidates = ShrinkCandidates::default();
  2304. let common_store_path = Path::new("");
  2305. let store_file_size = 100;
  2306. let store1_slot = 11;
  2307. let store1 = Arc::new(AccountStorageEntry::new(
  2308. common_store_path,
  2309. store1_slot,
  2310. store1_slot as AccountsFileId,
  2311. store_file_size,
  2312. AccountsFileProvider::AppendVec,
  2313. storage_access,
  2314. ));
  2315. db.storage.insert(store1_slot, Arc::clone(&store1));
  2316. store1.alive_bytes.store(0, Ordering::Release);
  2317. candidates.insert(store1_slot);
  2318. let store2_slot = 22;
  2319. let store2 = Arc::new(AccountStorageEntry::new(
  2320. common_store_path,
  2321. store2_slot,
  2322. store2_slot as AccountsFileId,
  2323. store_file_size,
  2324. AccountsFileProvider::AppendVec,
  2325. storage_access,
  2326. ));
  2327. db.storage.insert(store2_slot, Arc::clone(&store2));
  2328. store2
  2329. .alive_bytes
  2330. .store(store_file_size as usize / 2, Ordering::Release);
  2331. candidates.insert(store2_slot);
  2332. let store3_slot = 33;
  2333. let store3 = Arc::new(AccountStorageEntry::new(
  2334. common_store_path,
  2335. store3_slot,
  2336. store3_slot as AccountsFileId,
  2337. store_file_size,
  2338. AccountsFileProvider::AppendVec,
  2339. storage_access,
  2340. ));
  2341. db.storage.insert(store3_slot, Arc::clone(&store3));
  2342. store3
  2343. .alive_bytes
  2344. .store(store_file_size as usize, Ordering::Release);
  2345. candidates.insert(store3_slot);
  2346. // Set the target ratio to default (0.8), both store1 and store2 must be selected and store3 is ignored.
  2347. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO;
  2348. let (selected_candidates, next_candidates) =
  2349. db.select_candidates_by_total_usage(&candidates, target_alive_ratio);
  2350. assert_eq!(2, selected_candidates.len());
  2351. assert!(selected_candidates.contains(&store1_slot));
  2352. assert!(selected_candidates.contains(&store2_slot));
  2353. assert_eq!(0, next_candidates.len());
  2354. }
  2355. #[test_case(StorageAccess::Mmap)]
  2356. #[test_case(StorageAccess::File)]
  2357. fn test_select_candidates_by_total_usage_all_clean(storage_access: StorageAccess) {
  2358. // 2 candidates, they must be selected to achieve the target alive ratio
  2359. agave_logger::setup();
  2360. let db = AccountsDb::new_single_for_tests();
  2361. let mut candidates = ShrinkCandidates::default();
  2362. let common_store_path = Path::new("");
  2363. let store_file_size = 100;
  2364. let store1_slot = 11;
  2365. let store1 = Arc::new(AccountStorageEntry::new(
  2366. common_store_path,
  2367. store1_slot,
  2368. store1_slot as AccountsFileId,
  2369. store_file_size,
  2370. AccountsFileProvider::AppendVec,
  2371. storage_access,
  2372. ));
  2373. db.storage.insert(store1_slot, Arc::clone(&store1));
  2374. store1
  2375. .alive_bytes
  2376. .store(store_file_size as usize / 4, Ordering::Release);
  2377. candidates.insert(store1_slot);
  2378. let store2_slot = 22;
  2379. let store2 = Arc::new(AccountStorageEntry::new(
  2380. common_store_path,
  2381. store2_slot,
  2382. store2_slot as AccountsFileId,
  2383. store_file_size,
  2384. AccountsFileProvider::AppendVec,
  2385. storage_access,
  2386. ));
  2387. db.storage.insert(store2_slot, Arc::clone(&store2));
  2388. store2
  2389. .alive_bytes
  2390. .store(store_file_size as usize / 2, Ordering::Release);
  2391. candidates.insert(store2_slot);
  2392. // Set the target ratio to default (0.8), both stores from the two different slots must be selected.
  2393. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO;
  2394. let (selected_candidates, next_candidates) =
  2395. db.select_candidates_by_total_usage(&candidates, target_alive_ratio);
  2396. assert_eq!(2, selected_candidates.len());
  2397. assert!(selected_candidates.contains(&store1_slot));
  2398. assert!(selected_candidates.contains(&store2_slot));
  2399. assert_eq!(0, next_candidates.len());
  2400. }
  2401. #[test]
  2402. fn test_delete_dependencies() {
  2403. agave_logger::setup();
  2404. let accounts_index = AccountsIndex::<AccountInfo, AccountInfo>::default_for_tests();
  2405. let key0 = Pubkey::new_from_array([0u8; 32]);
  2406. let key1 = Pubkey::new_from_array([1u8; 32]);
  2407. let key2 = Pubkey::new_from_array([2u8; 32]);
  2408. let info0 = AccountInfo::new(StorageLocation::AppendVec(0, 0), true);
  2409. let info1 = AccountInfo::new(StorageLocation::AppendVec(1, 0), true);
  2410. let info2 = AccountInfo::new(StorageLocation::AppendVec(2, 0), true);
  2411. let info3 = AccountInfo::new(StorageLocation::AppendVec(3, 0), true);
  2412. let mut reclaims = ReclaimsSlotList::new();
  2413. accounts_index.upsert(
  2414. 0,
  2415. 0,
  2416. &key0,
  2417. &AccountSharedData::default(),
  2418. &AccountSecondaryIndexes::default(),
  2419. info0,
  2420. &mut reclaims,
  2421. UpsertReclaim::IgnoreReclaims,
  2422. );
  2423. accounts_index.upsert(
  2424. 1,
  2425. 1,
  2426. &key0,
  2427. &AccountSharedData::default(),
  2428. &AccountSecondaryIndexes::default(),
  2429. info1,
  2430. &mut reclaims,
  2431. UpsertReclaim::IgnoreReclaims,
  2432. );
  2433. accounts_index.upsert(
  2434. 1,
  2435. 1,
  2436. &key1,
  2437. &AccountSharedData::default(),
  2438. &AccountSecondaryIndexes::default(),
  2439. info1,
  2440. &mut reclaims,
  2441. UpsertReclaim::IgnoreReclaims,
  2442. );
  2443. accounts_index.upsert(
  2444. 2,
  2445. 2,
  2446. &key1,
  2447. &AccountSharedData::default(),
  2448. &AccountSecondaryIndexes::default(),
  2449. info2,
  2450. &mut reclaims,
  2451. UpsertReclaim::IgnoreReclaims,
  2452. );
  2453. accounts_index.upsert(
  2454. 2,
  2455. 2,
  2456. &key2,
  2457. &AccountSharedData::default(),
  2458. &AccountSecondaryIndexes::default(),
  2459. info2,
  2460. &mut reclaims,
  2461. UpsertReclaim::IgnoreReclaims,
  2462. );
  2463. accounts_index.upsert(
  2464. 3,
  2465. 3,
  2466. &key2,
  2467. &AccountSharedData::default(),
  2468. &AccountSecondaryIndexes::default(),
  2469. info3,
  2470. &mut reclaims,
  2471. UpsertReclaim::IgnoreReclaims,
  2472. );
  2473. accounts_index.add_root(0);
  2474. accounts_index.add_root(1);
  2475. accounts_index.add_root(2);
  2476. accounts_index.add_root(3);
  2477. let num_bins = accounts_index.bins();
  2478. let mut candidates: Box<_> = std::iter::repeat_with(HashMap::<Pubkey, CleaningInfo>::new)
  2479. .take(num_bins)
  2480. .collect();
  2481. for key in [&key0, &key1, &key2] {
  2482. let (rooted_entries, ref_count) = accounts_index.get_and_then(key, |entry| {
  2483. let slot_list_lock = entry.unwrap().slot_list_read_lock();
  2484. let rooted = accounts_index.get_rooted_entries(slot_list_lock.as_ref(), None);
  2485. (false, (rooted, entry.unwrap().ref_count()))
  2486. });
  2487. let index = accounts_index.bin_calculator.bin_from_pubkey(key);
  2488. let candidates_bin = &mut candidates[index];
  2489. candidates_bin.insert(
  2490. *key,
  2491. CleaningInfo {
  2492. slot_list: rooted_entries,
  2493. ref_count,
  2494. ..Default::default()
  2495. },
  2496. );
  2497. }
  2498. for candidates_bin in candidates.iter() {
  2499. for (
  2500. key,
  2501. CleaningInfo {
  2502. slot_list: list,
  2503. ref_count,
  2504. ..
  2505. },
  2506. ) in candidates_bin.iter()
  2507. {
  2508. info!(" purge {key} ref_count {ref_count} =>");
  2509. for x in list {
  2510. info!(" {x:?}");
  2511. }
  2512. }
  2513. }
  2514. let mut store_counts = HashMap::new();
  2515. store_counts.insert(0, (0, HashSet::from_iter(vec![key0])));
  2516. store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1])));
  2517. store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2])));
  2518. store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
  2519. let accounts = AccountsDb::new_single_for_tests();
  2520. accounts.calc_delete_dependencies(&candidates, &mut store_counts, None);
  2521. let mut stores: Vec<_> = store_counts.keys().cloned().collect();
  2522. stores.sort_unstable();
  2523. for store in &stores {
  2524. info!(
  2525. "store: {:?} : {:?}",
  2526. store,
  2527. store_counts.get(store).unwrap()
  2528. );
  2529. }
  2530. for x in 0..3 {
  2531. // if the store count doesn't exist for this id, then it is implied to be > 0
  2532. assert!(store_counts
  2533. .get(&x)
  2534. .map(|entry| entry.0 >= 1)
  2535. .unwrap_or(true));
  2536. }
  2537. }
  2538. #[test]
  2539. fn test_account_balance_for_capitalization_sysvar() {
  2540. let normal_sysvar =
  2541. solana_account::create_account_for_test(&solana_slot_history::SlotHistory::default());
  2542. assert_eq!(normal_sysvar.lamports(), 1);
  2543. }
  2544. #[test]
  2545. fn test_account_balance_for_capitalization_native_program() {
  2546. let normal_native_program = create_loadable_account_for_test("foo");
  2547. assert_eq!(normal_native_program.lamports(), 1);
  2548. }
  2549. #[test]
  2550. fn test_store_overhead() {
  2551. agave_logger::setup();
  2552. let accounts = AccountsDb::new_single_for_tests();
  2553. let account = AccountSharedData::default();
  2554. let pubkey = solana_pubkey::new_rand();
  2555. accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
  2556. accounts.add_root_and_flush_write_cache(0);
  2557. let store = accounts.storage.get_slot_storage_entry(0).unwrap();
  2558. let total_len = store.accounts.len();
  2559. info!("total: {total_len}");
  2560. assert_eq!(total_len, STORE_META_OVERHEAD);
  2561. }
  2562. #[test]
  2563. fn test_store_clean_after_shrink() {
  2564. agave_logger::setup();
  2565. let accounts = AccountsDb::new_single_for_tests();
  2566. let epoch_schedule = EpochSchedule::default();
  2567. let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default());
  2568. let pubkey1 = solana_pubkey::new_rand();
  2569. accounts.store_for_tests((0, &[(&pubkey1, &account)][..]));
  2570. let pubkey2 = solana_pubkey::new_rand();
  2571. accounts.store_for_tests((0, &[(&pubkey2, &account)][..]));
  2572. let zero_account = AccountSharedData::new(0, 1, &Pubkey::default());
  2573. accounts.store_for_tests((1, &[(&pubkey1, &zero_account)][..]));
  2574. // Add root 0 and flush separately
  2575. accounts.add_root(0);
  2576. accounts.flush_accounts_cache(true, None);
  2577. // clear out the dirty keys
  2578. accounts.clean_accounts_for_tests();
  2579. // flush 1
  2580. accounts.add_root(1);
  2581. accounts.flush_accounts_cache(true, None);
  2582. accounts.print_accounts_stats("pre-clean");
  2583. // clean to remove pubkey1 from 0,
  2584. // shrink to shrink pubkey1 from 0
  2585. // then another clean to remove pubkey1 from slot 1
  2586. accounts.clean_accounts_for_tests();
  2587. accounts.shrink_candidate_slots(&epoch_schedule);
  2588. accounts.clean_accounts_for_tests();
  2589. accounts.print_accounts_stats("post-clean");
  2590. accounts.assert_ref_count(&pubkey1, 0);
  2591. }
  2592. #[test]
  2593. #[should_panic(expected = "We've run out of storage ids!")]
  2594. fn test_wrapping_storage_id() {
  2595. let db = AccountsDb::new_single_for_tests();
  2596. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2597. // set 'next' id to the max possible value
  2598. db.next_id.store(AccountsFileId::MAX, Ordering::Release);
  2599. let slots = 3;
  2600. let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
  2601. // write unique keys to successive slots
  2602. keys.iter().enumerate().for_each(|(slot, key)| {
  2603. let slot = slot as Slot;
  2604. db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
  2605. db.add_root_and_flush_write_cache(slot);
  2606. });
  2607. assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire));
  2608. let ancestors = Ancestors::default();
  2609. keys.iter().for_each(|key| {
  2610. assert!(db.load_without_fixed_root(&ancestors, key).is_some());
  2611. });
  2612. }
  2613. #[test]
  2614. #[should_panic(expected = "We've run out of storage ids!")]
  2615. fn test_reuse_storage_id() {
  2616. agave_logger::setup();
  2617. let db = AccountsDb::new_single_for_tests();
  2618. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2619. // set 'next' id to the max possible value
  2620. db.next_id.store(AccountsFileId::MAX, Ordering::Release);
  2621. let slots = 3;
  2622. let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::<Vec<_>>();
  2623. // write unique keys to successive slots
  2624. keys.iter().enumerate().for_each(|(slot, key)| {
  2625. let slot = slot as Slot;
  2626. db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
  2627. db.add_root_and_flush_write_cache(slot);
  2628. // reset next_id to what it was previously to cause us to re-use the same id
  2629. db.next_id.store(AccountsFileId::MAX, Ordering::Release);
  2630. });
  2631. let ancestors = Ancestors::default();
  2632. keys.iter().for_each(|key| {
  2633. assert!(db.load_without_fixed_root(&ancestors, key).is_some());
  2634. });
  2635. }
  2636. #[test]
  2637. fn test_zero_lamport_new_root_not_cleaned() {
  2638. let db = AccountsDb::new_single_for_tests();
  2639. let account_key = Pubkey::new_unique();
  2640. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2641. // Store zero lamport account into slots 0 and 1, root both slots
  2642. db.store_for_tests((0, [(&account_key, &zero_lamport_account)].as_slice()));
  2643. db.store_for_tests((1, [(&account_key, &zero_lamport_account)].as_slice()));
  2644. db.add_root_and_flush_write_cache(0);
  2645. db.add_root_and_flush_write_cache(1);
  2646. // Only clean zero lamport accounts up to slot 0
  2647. db.clean_accounts(Some(0), false, &EpochSchedule::default());
  2648. // Should still be able to find zero lamport account in slot 1
  2649. assert_eq!(
  2650. db.load_without_fixed_root(&Ancestors::default(), &account_key),
  2651. Some((zero_lamport_account, 1))
  2652. );
  2653. }
  2654. #[test]
  2655. fn test_store_load_cached() {
  2656. let db = AccountsDb::new_single_for_tests();
  2657. let key = Pubkey::default();
  2658. let account0 = AccountSharedData::new(1, 0, &key);
  2659. let slot = 0;
  2660. db.store_for_tests((slot, &[(&key, &account0)][..]));
  2661. // Load with no ancestors and no root will return nothing
  2662. assert!(db
  2663. .load_without_fixed_root(&Ancestors::default(), &key)
  2664. .is_none());
  2665. // Load with ancestors not equal to `slot` will return nothing
  2666. let ancestors = vec![(slot + 1, 1)].into_iter().collect();
  2667. assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
  2668. // Load with ancestors equal to `slot` will return the account
  2669. let ancestors = vec![(slot, 1)].into_iter().collect();
  2670. assert_eq!(
  2671. db.load_without_fixed_root(&ancestors, &key),
  2672. Some((account0.clone(), slot))
  2673. );
  2674. // Adding root will return the account even without ancestors
  2675. db.add_root(slot);
  2676. assert_eq!(
  2677. db.load_without_fixed_root(&Ancestors::default(), &key),
  2678. Some((account0, slot))
  2679. );
  2680. }
  2681. #[test]
  2682. fn test_store_flush_load_cached() {
  2683. let db = AccountsDb::new_single_for_tests();
  2684. let key = Pubkey::default();
  2685. let account0 = AccountSharedData::new(1, 0, &key);
  2686. let slot = 0;
  2687. db.store_for_tests((slot, &[(&key, &account0)][..]));
  2688. db.mark_slot_frozen(slot);
  2689. // No root was added yet, requires an ancestor to find
  2690. // the account
  2691. db.flush_accounts_cache(true, None);
  2692. let ancestors = vec![(slot, 1)].into_iter().collect();
  2693. assert_eq!(
  2694. db.load_without_fixed_root(&ancestors, &key),
  2695. Some((account0.clone(), slot))
  2696. );
  2697. // Add root then flush
  2698. db.add_root(slot);
  2699. db.flush_accounts_cache(true, None);
  2700. assert_eq!(
  2701. db.load_without_fixed_root(&Ancestors::default(), &key),
  2702. Some((account0, slot))
  2703. );
  2704. }
  2705. #[test]
  2706. fn test_flush_accounts_cache() {
  2707. let db = AccountsDb::new_single_for_tests();
  2708. let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
  2709. let unrooted_slot = 4;
  2710. let root5 = 5;
  2711. let root6 = 6;
  2712. let unrooted_key = solana_pubkey::new_rand();
  2713. let key5 = solana_pubkey::new_rand();
  2714. let key6 = solana_pubkey::new_rand();
  2715. db.store_for_tests((unrooted_slot, &[(&unrooted_key, &account0)][..]));
  2716. db.store_for_tests((root5, &[(&key5, &account0)][..]));
  2717. db.store_for_tests((root6, &[(&key6, &account0)][..]));
  2718. for slot in &[unrooted_slot, root5, root6] {
  2719. db.mark_slot_frozen(*slot);
  2720. }
  2721. db.add_root(root5);
  2722. db.add_root(root6);
  2723. // Unrooted slot should be able to be fetched before the flush
  2724. let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
  2725. assert_eq!(
  2726. db.load_without_fixed_root(&ancestors, &unrooted_key),
  2727. Some((account0.clone(), unrooted_slot))
  2728. );
  2729. db.flush_accounts_cache(true, None);
  2730. // After the flush, the unrooted slot is still in the cache
  2731. assert!(db
  2732. .load_without_fixed_root(&ancestors, &unrooted_key)
  2733. .is_some());
  2734. assert!(db.accounts_index.contains(&unrooted_key));
  2735. assert_eq!(db.accounts_cache.num_slots(), 1);
  2736. assert!(db.accounts_cache.slot_cache(unrooted_slot).is_some());
  2737. assert_eq!(
  2738. db.load_without_fixed_root(&Ancestors::default(), &key5),
  2739. Some((account0.clone(), root5))
  2740. );
  2741. assert_eq!(
  2742. db.load_without_fixed_root(&Ancestors::default(), &key6),
  2743. Some((account0, root6))
  2744. );
  2745. }
  2746. fn max_cache_slots() -> usize {
  2747. // this used to be the limiting factor - used here to facilitate tests.
  2748. 200
  2749. }
  2750. #[test]
  2751. fn test_flush_accounts_cache_if_needed() {
  2752. run_test_flush_accounts_cache_if_needed(0, 2 * max_cache_slots());
  2753. run_test_flush_accounts_cache_if_needed(2 * max_cache_slots(), 0);
  2754. run_test_flush_accounts_cache_if_needed(max_cache_slots() - 1, 0);
  2755. run_test_flush_accounts_cache_if_needed(0, max_cache_slots() - 1);
  2756. run_test_flush_accounts_cache_if_needed(max_cache_slots(), 0);
  2757. run_test_flush_accounts_cache_if_needed(0, max_cache_slots());
  2758. run_test_flush_accounts_cache_if_needed(2 * max_cache_slots(), 2 * max_cache_slots());
  2759. run_test_flush_accounts_cache_if_needed(max_cache_slots() - 1, max_cache_slots() - 1);
  2760. run_test_flush_accounts_cache_if_needed(max_cache_slots(), max_cache_slots());
  2761. }
  2762. fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) {
  2763. let mut db = AccountsDb::new_single_for_tests();
  2764. db.write_cache_limit_bytes = Some(max_cache_slots() as u64);
  2765. let space = 1; // # data bytes per account. write cache counts data len
  2766. let account0 = AccountSharedData::new(1, space, &Pubkey::default());
  2767. let mut keys = vec![];
  2768. let num_slots = 2 * max_cache_slots();
  2769. for i in 0..num_roots + num_unrooted {
  2770. let key = Pubkey::new_unique();
  2771. db.store_for_tests((i as Slot, &[(&key, &account0)][..]));
  2772. keys.push(key);
  2773. db.mark_slot_frozen(i as Slot);
  2774. if i < num_roots {
  2775. db.add_root(i as Slot);
  2776. }
  2777. }
  2778. db.flush_accounts_cache(false, None);
  2779. let total_slots = num_roots + num_unrooted;
  2780. // If there's <= the max size, then nothing will be flushed from the slot
  2781. if total_slots <= max_cache_slots() {
  2782. assert_eq!(db.accounts_cache.num_slots(), total_slots);
  2783. } else {
  2784. // Otherwise, all the roots are flushed, and only at most max_cache_slots()
  2785. // of the unrooted slots are kept in the cache
  2786. let expected_size = std::cmp::min(num_unrooted, max_cache_slots());
  2787. if expected_size > 0 {
  2788. // +1: slot is 1-based. slot 1 has 1 byte of data
  2789. for unrooted_slot in (total_slots - expected_size + 1)..total_slots {
  2790. assert!(
  2791. db.accounts_cache
  2792. .slot_cache(unrooted_slot as Slot)
  2793. .is_some(),
  2794. "unrooted_slot: {unrooted_slot}, total_slots: {total_slots}, expected_size: \
  2795. {expected_size}"
  2796. );
  2797. }
  2798. }
  2799. }
  2800. // Should still be able to fetch all the accounts after flush
  2801. for (slot, key) in (0..num_slots as Slot).zip(keys) {
  2802. let ancestors = if slot < num_roots as Slot {
  2803. Ancestors::default()
  2804. } else {
  2805. vec![(slot, 1)].into_iter().collect()
  2806. };
  2807. assert_eq!(
  2808. db.load_without_fixed_root(&ancestors, &key),
  2809. Some((account0.clone(), slot))
  2810. );
  2811. }
  2812. }
  2813. #[test]
  2814. fn test_read_only_accounts_cache() {
  2815. let db = Arc::new(AccountsDb::new_single_for_tests());
  2816. let account_key = Pubkey::new_unique();
  2817. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2818. let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  2819. db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
  2820. db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
  2821. db.add_root(0);
  2822. db.add_root(1);
  2823. db.clean_accounts_for_tests();
  2824. db.flush_accounts_cache(true, None);
  2825. db.clean_accounts_for_tests();
  2826. db.add_root(2);
  2827. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2828. let account = db
  2829. .load_with_fixed_root(&Ancestors::default(), &account_key)
  2830. .map(|(account, _)| account)
  2831. .unwrap();
  2832. assert_eq!(account.lamports(), 1);
  2833. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2834. let account = db
  2835. .load_with_fixed_root(&Ancestors::default(), &account_key)
  2836. .map(|(account, _)| account)
  2837. .unwrap();
  2838. assert_eq!(account.lamports(), 1);
  2839. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2840. db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..]));
  2841. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2842. let account = db
  2843. .load_with_fixed_root(&Ancestors::default(), &account_key)
  2844. .map(|(account, _)| account);
  2845. assert!(account.is_none());
  2846. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2847. }
  2848. #[test]
  2849. fn test_load_with_read_only_accounts_cache() {
  2850. let db = Arc::new(AccountsDb::new_single_for_tests());
  2851. let account_key = Pubkey::new_unique();
  2852. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2853. let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  2854. db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
  2855. db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
  2856. db.add_root(0);
  2857. db.add_root(1);
  2858. db.clean_accounts_for_tests();
  2859. db.flush_accounts_cache(true, None);
  2860. db.clean_accounts_for_tests();
  2861. db.add_root(2);
  2862. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2863. let (account, slot) = db
  2864. .load_account_with(&Ancestors::default(), &account_key, false)
  2865. .unwrap();
  2866. assert_eq!(account.lamports(), 1);
  2867. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2868. assert_eq!(slot, 1);
  2869. let (account, slot) = db
  2870. .load_account_with(&Ancestors::default(), &account_key, true)
  2871. .unwrap();
  2872. assert_eq!(account.lamports(), 1);
  2873. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2874. assert_eq!(slot, 1);
  2875. db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..]));
  2876. let account = db.load_account_with(&Ancestors::default(), &account_key, false);
  2877. assert!(account.is_none());
  2878. assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
  2879. db.read_only_accounts_cache.reset_for_tests();
  2880. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2881. let account = db.load_account_with(&Ancestors::default(), &account_key, true);
  2882. assert!(account.is_none());
  2883. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2884. let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
  2885. db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
  2886. let (account, slot) = db
  2887. .load_account_with(&Ancestors::default(), &account_key, false)
  2888. .unwrap();
  2889. assert_eq!(account.lamports(), 2);
  2890. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2891. assert_eq!(slot, 2);
  2892. let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
  2893. db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
  2894. let (account, slot) = db
  2895. .load_account_with(&Ancestors::default(), &account_key, true)
  2896. .unwrap();
  2897. assert_eq!(account.lamports(), 2);
  2898. // The account shouldn't be added to read_only_cache because it is in write_cache.
  2899. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2900. assert_eq!(slot, 2);
  2901. }
  2902. /// a test that will accept either answer
  2903. const LOAD_ZERO_LAMPORTS_ANY_TESTS: LoadZeroLamports = LoadZeroLamports::None;
  2904. #[test]
  2905. fn test_flush_cache_clean() {
  2906. let db = Arc::new(AccountsDb::new_single_for_tests());
  2907. let account_key = Pubkey::new_unique();
  2908. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2909. let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  2910. db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
  2911. db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
  2912. db.add_root(0);
  2913. db.add_root(1);
  2914. // Clean should not remove anything yet as nothing has been flushed
  2915. db.clean_accounts_for_tests();
  2916. let account = db
  2917. .do_load(
  2918. &Ancestors::default(),
  2919. &account_key,
  2920. Some(0),
  2921. LoadHint::Unspecified,
  2922. LoadZeroLamports::SomeWithZeroLamportAccountForTests,
  2923. )
  2924. .unwrap();
  2925. assert_eq!(account.0.lamports(), 0);
  2926. // since this item is in the cache, it should not be in the read only cache
  2927. assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
  2928. // Flush, then clean again. Should not need another root to initiate the cleaning
  2929. // because `accounts_index.uncleaned_roots` should be correct
  2930. db.flush_accounts_cache(true, None);
  2931. db.clean_accounts_for_tests();
  2932. assert!(db
  2933. .do_load(
  2934. &Ancestors::default(),
  2935. &account_key,
  2936. Some(0),
  2937. LoadHint::Unspecified,
  2938. LOAD_ZERO_LAMPORTS_ANY_TESTS
  2939. )
  2940. .is_none());
  2941. }
  2942. #[test_case(MarkObsoleteAccounts::Enabled)]
  2943. #[test_case(MarkObsoleteAccounts::Disabled)]
  2944. fn test_flush_cache_dont_clean_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
  2945. let db = AccountsDb::new_with_config(
  2946. Vec::new(),
  2947. AccountsDbConfig {
  2948. mark_obsolete_accounts,
  2949. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  2950. },
  2951. None,
  2952. Arc::default(),
  2953. );
  2954. // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
  2955. // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
  2956. db.set_latest_full_snapshot_slot(0);
  2957. let zero_lamport_account_key = Pubkey::new_unique();
  2958. let other_account_key = Pubkey::new_unique();
  2959. let original_lamports = 1;
  2960. let slot0_account =
  2961. AccountSharedData::new(original_lamports, 1, AccountSharedData::default().owner());
  2962. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  2963. // Store into slot 0, and then flush the slot to storage
  2964. db.store_for_tests((0, &[(&zero_lamport_account_key, &slot0_account)][..]));
  2965. // Second key keeps other lamport account entry for slot 0 alive,
  2966. // preventing clean of the zero_lamport_account in slot 1.
  2967. db.store_for_tests((0, &[(&other_account_key, &slot0_account)][..]));
  2968. db.add_root(0);
  2969. db.flush_accounts_cache(true, None);
  2970. assert!(db.storage.get_slot_storage_entry(0).is_some());
  2971. // Store into slot 1, a dummy slot that will be dead and purged before flush
  2972. db.store_for_tests((1, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
  2973. // Store into slot 2, which makes all updates from slot 1 outdated.
  2974. // This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged
  2975. // before it even reaches storage, but this purge of slot 1 should not affect
  2976. // the refcount of `zero_lamport_account_key` because cached keys do not bump
  2977. // the refcount in the index. This means clean should *not* remove
  2978. // `zero_lamport_account_key` from slot 2
  2979. db.store_for_tests((2, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
  2980. db.add_root(1);
  2981. db.add_root(2);
  2982. // Flush, then clean. Should not need another root to initiate the cleaning
  2983. // because `accounts_index.uncleaned_roots` should be correct
  2984. db.flush_accounts_cache(true, None);
  2985. db.clean_accounts_for_tests();
  2986. // The `zero_lamport_account_key` is still alive in slot 0, so refcount for the
  2987. // pubkey should be 2
  2988. if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
  2989. db.assert_ref_count(&zero_lamport_account_key, 2);
  2990. } else {
  2991. // However, if obsolete accounts are enabled, it will only be alive in slot 2
  2992. db.assert_ref_count(&zero_lamport_account_key, 1);
  2993. }
  2994. db.assert_ref_count(&other_account_key, 1);
  2995. // The zero-lamport account in slot 2 should not be purged yet, because the
  2996. // entry in slot 0 is blocking cleanup of the zero-lamport account.
  2997. // With obsolete accounts enabled, the zero lamport account being newer
  2998. // than the latest full snapshot blocks cleanup
  2999. let max_root = None;
  3000. // Fine to simulate a transaction load since we are not doing any out of band
  3001. // removals, only using clean_accounts
  3002. let load_hint = LoadHint::FixedMaxRoot;
  3003. assert_eq!(
  3004. db.do_load(
  3005. &Ancestors::default(),
  3006. &zero_lamport_account_key,
  3007. max_root,
  3008. load_hint,
  3009. LoadZeroLamports::SomeWithZeroLamportAccountForTests,
  3010. )
  3011. .unwrap()
  3012. .0
  3013. .lamports(),
  3014. 0
  3015. );
  3016. }
  3017. /// Ensure that rooting a slot and flushing it in the write cache populates `uncleaned_pubkeys`,
  3018. /// and then that `clean` removes the slot afterwards.
  3019. #[test]
  3020. fn test_flush_cache_populates_uncleaned_pubkeys() {
  3021. let accounts_db = AccountsDb::new_single_for_tests();
  3022. let slot = 123;
  3023. let pubkey = Pubkey::new_unique();
  3024. let account = AccountSharedData::new(10, 0, &Pubkey::default());
  3025. // storing accounts doesn't add anything to uncleaned_pubkeys
  3026. accounts_db.store_for_tests((slot, [(pubkey, account)].as_slice()));
  3027. assert_eq!(accounts_db.get_len_of_slots_with_uncleaned_pubkeys(), 0);
  3028. // ...but ensure that rooting and flushing the write cache does
  3029. accounts_db.add_root_and_flush_write_cache(slot);
  3030. assert_eq!(accounts_db.get_len_of_slots_with_uncleaned_pubkeys(), 1);
  3031. // ...and then clean removes the slot from uncleaned_pubkeys
  3032. accounts_db.clean_accounts_for_tests();
  3033. assert_eq!(accounts_db.get_len_of_slots_with_uncleaned_pubkeys(), 0);
  3034. }
  3035. struct ScanTracker {
  3036. t_scan: JoinHandle<()>,
  3037. exit: Arc<AtomicBool>,
  3038. }
  3039. impl ScanTracker {
  3040. fn exit(self) -> thread::Result<()> {
  3041. self.exit.store(true, Ordering::Relaxed);
  3042. self.t_scan.join()
  3043. }
  3044. }
  3045. fn setup_scan(
  3046. db: Arc<AccountsDb>,
  3047. scan_ancestors: Arc<Ancestors>,
  3048. bank_id: BankId,
  3049. stall_key: Pubkey,
  3050. ) -> ScanTracker {
  3051. let exit = Arc::new(AtomicBool::new(false));
  3052. let exit_ = exit.clone();
  3053. let ready = Arc::new(AtomicBool::new(false));
  3054. let ready_ = ready.clone();
  3055. let t_scan = Builder::new()
  3056. .name("scan".to_string())
  3057. .spawn(move || {
  3058. db.scan_accounts(
  3059. &scan_ancestors,
  3060. bank_id,
  3061. |maybe_account| {
  3062. ready_.store(true, Ordering::Relaxed);
  3063. if let Some((pubkey, _, _)) = maybe_account {
  3064. if *pubkey == stall_key {
  3065. loop {
  3066. if exit_.load(Ordering::Relaxed) {
  3067. break;
  3068. } else {
  3069. sleep(Duration::from_millis(10));
  3070. }
  3071. }
  3072. }
  3073. }
  3074. },
  3075. &ScanConfig::default(),
  3076. )
  3077. .unwrap();
  3078. })
  3079. .unwrap();
  3080. // Wait for scan to start
  3081. while !ready.load(Ordering::Relaxed) {
  3082. sleep(Duration::from_millis(10));
  3083. }
  3084. ScanTracker { t_scan, exit }
  3085. }
  3086. #[test]
  3087. fn test_scan_flush_accounts_cache_then_clean_drop() {
  3088. let db = Arc::new(AccountsDb::new_single_for_tests());
  3089. let account_key = Pubkey::new_unique();
  3090. let account_key2 = Pubkey::new_unique();
  3091. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  3092. let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  3093. let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
  3094. /*
  3095. Store zero lamport account into slots 0, 1, 2 where
  3096. root slots are 0, 2, and slot 1 is unrooted.
  3097. 0 (root)
  3098. / \
  3099. 1 2 (root)
  3100. */
  3101. db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
  3102. db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
  3103. // Fodder for the scan so that the lock on `account_key` is not held
  3104. db.store_for_tests((1, &[(&account_key2, &slot1_account)][..]));
  3105. db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
  3106. let max_scan_root = 0;
  3107. db.add_root(max_scan_root);
  3108. let scan_ancestors: Arc<Ancestors> = Arc::new(vec![(0, 1), (1, 1)].into_iter().collect());
  3109. let bank_id = 0;
  3110. let scan_tracker = setup_scan(db.clone(), scan_ancestors.clone(), bank_id, account_key2);
  3111. // Add a new root 2
  3112. let new_root = 2;
  3113. db.add_root(new_root);
  3114. // Check that the scan is properly set up
  3115. assert_eq!(
  3116. db.accounts_index.min_ongoing_scan_root().unwrap(),
  3117. max_scan_root
  3118. );
  3119. // If we specify a requested_flush_root == 2, then `slot 2 <= max_flush_slot` will
  3120. // be flushed even though `slot 2 > max_scan_root`. The unrooted slot 1 should
  3121. // remain in the cache
  3122. db.flush_accounts_cache(true, Some(new_root));
  3123. assert_eq!(db.accounts_cache.num_slots(), 1);
  3124. assert!(db.accounts_cache.slot_cache(1).is_some());
  3125. // Intra cache cleaning should not clean the entry for `account_key` from slot 0,
  3126. // even though it was updated in slot `2` because of the ongoing scan
  3127. let account = db
  3128. .do_load(
  3129. &Ancestors::default(),
  3130. &account_key,
  3131. Some(0),
  3132. LoadHint::Unspecified,
  3133. LoadZeroLamports::SomeWithZeroLamportAccountForTests,
  3134. )
  3135. .unwrap();
  3136. assert_eq!(account.0.lamports(), zero_lamport_account.lamports());
  3137. // Run clean, unrooted slot 1 should not be purged, and still readable from the cache,
  3138. // because we're still doing a scan on it.
  3139. db.clean_accounts_for_tests();
  3140. let account = db
  3141. .do_load(
  3142. &scan_ancestors,
  3143. &account_key,
  3144. Some(max_scan_root),
  3145. LoadHint::Unspecified,
  3146. LOAD_ZERO_LAMPORTS_ANY_TESTS,
  3147. )
  3148. .unwrap();
  3149. assert_eq!(account.0.lamports(), slot1_account.lamports());
  3150. // When the scan is over, clean should not panic and should not purge something
  3151. // still in the cache.
  3152. scan_tracker.exit().unwrap();
  3153. db.clean_accounts_for_tests();
  3154. let account = db
  3155. .do_load(
  3156. &scan_ancestors,
  3157. &account_key,
  3158. Some(max_scan_root),
  3159. LoadHint::Unspecified,
  3160. LOAD_ZERO_LAMPORTS_ANY_TESTS,
  3161. )
  3162. .unwrap();
  3163. assert_eq!(account.0.lamports(), slot1_account.lamports());
  3164. // Simulate dropping the bank, which finally removes the slot from the cache
  3165. let bank_id = 1;
  3166. db.purge_slot(1, bank_id, false);
  3167. assert!(db
  3168. .do_load(
  3169. &scan_ancestors,
  3170. &account_key,
  3171. Some(max_scan_root),
  3172. LoadHint::Unspecified,
  3173. LOAD_ZERO_LAMPORTS_ANY_TESTS
  3174. )
  3175. .is_none());
  3176. }
  3177. impl AccountsDb {
  3178. fn get_and_assert_single_storage(&self, slot: Slot) -> Arc<AccountStorageEntry> {
  3179. self.storage.get_slot_storage_entry(slot).unwrap()
  3180. }
  3181. }
  3182. define_accounts_db_test!(test_alive_bytes, |accounts_db| {
  3183. let slot: Slot = 0;
  3184. let num_keys = 10;
  3185. let mut num_obsolete_accounts = 0;
  3186. for data_size in 0..num_keys {
  3187. let account = AccountSharedData::new(1, data_size, &Pubkey::default());
  3188. accounts_db.store_for_tests((slot, &[(&Pubkey::new_unique(), &account)][..]));
  3189. }
  3190. accounts_db.add_root(slot);
  3191. accounts_db.flush_accounts_cache(true, None);
  3192. // Flushing cache should only create one storage entry
  3193. let storage0 = accounts_db.get_and_assert_single_storage(slot);
  3194. storage0
  3195. .accounts
  3196. .scan_accounts_without_data(|_offset, account| {
  3197. let before_size = storage0.alive_bytes();
  3198. let account_info = accounts_db
  3199. .accounts_index
  3200. .get_and_then(account.pubkey(), |entry| {
  3201. // Should only be one entry per key, since every key was only stored to slot 0
  3202. (false, entry.unwrap().slot_list_read_lock()[0])
  3203. });
  3204. assert_eq!(account_info.0, slot);
  3205. let reclaims = [account_info];
  3206. num_obsolete_accounts += reclaims.len();
  3207. accounts_db.remove_dead_accounts(
  3208. reclaims.iter(),
  3209. None,
  3210. MarkAccountsObsolete::Yes(slot),
  3211. );
  3212. let after_size = storage0.alive_bytes();
  3213. if storage0.count() == 0 {
  3214. // when `remove_dead_accounts` reaches 0 accounts, all bytes are marked as dead
  3215. assert_eq!(after_size, 0);
  3216. } else {
  3217. let stored_size_aligned = storage0.accounts.calculate_stored_size(account.data_len);
  3218. assert_eq!(before_size, after_size + stored_size_aligned);
  3219. assert_eq!(
  3220. storage0
  3221. .obsolete_accounts_read_lock()
  3222. .filter_obsolete_accounts(None)
  3223. .count(),
  3224. num_obsolete_accounts
  3225. );
  3226. }
  3227. })
  3228. .expect("must scan accounts storage");
  3229. });
  3230. // Test alive_bytes_exclude_zero_lamport_single_ref_accounts calculation
  3231. define_accounts_db_test!(
  3232. test_alive_bytes_exclude_zero_lamport_single_ref_accounts,
  3233. |accounts_db| {
  3234. let slot: Slot = 0;
  3235. let num_keys = 10;
  3236. let mut pubkeys = vec![];
  3237. // populate storage with zero lamport single ref (zlsr) accounts
  3238. for _i in 0..num_keys {
  3239. let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  3240. let key = Pubkey::new_unique();
  3241. accounts_db.store_for_tests((slot, &[(&key, &zero_account)][..]));
  3242. pubkeys.push(key);
  3243. }
  3244. accounts_db.add_root(slot);
  3245. accounts_db.flush_accounts_cache(true, None);
  3246. // Flushing cache should only create one storage entry
  3247. let storage = accounts_db.get_and_assert_single_storage(slot);
  3248. let alive_bytes = storage.alive_bytes();
  3249. assert!(alive_bytes > 0);
  3250. // scan the accounts to track zlsr accounts
  3251. accounts_db.accounts_index.scan(
  3252. pubkeys.iter(),
  3253. |_pubkey, slots_refs| {
  3254. let (slot_list, ref_count) = slots_refs.unwrap();
  3255. assert_eq!(slot_list.len(), 1);
  3256. assert_eq!(ref_count, 1);
  3257. let (slot, acct_info) = slot_list.first().unwrap();
  3258. assert_eq!(*slot, 0);
  3259. accounts_db.zero_lamport_single_ref_found(*slot, acct_info.offset());
  3260. AccountsIndexScanResult::OnlyKeepInMemoryIfDirty
  3261. },
  3262. None,
  3263. ScanFilter::All,
  3264. );
  3265. // assert the number of zlsr accounts
  3266. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), num_keys);
  3267. // assert the "alive_bytes_exclude_zero_lamport_single_ref_accounts"
  3268. match accounts_db.accounts_file_provider {
  3269. AccountsFileProvider::AppendVec => {
  3270. assert_eq!(
  3271. storage.alive_bytes_exclude_zero_lamport_single_ref_accounts(),
  3272. 0
  3273. );
  3274. }
  3275. AccountsFileProvider::HotStorage => {
  3276. // For tired-storage, alive bytes are only an approximation.
  3277. // Therefore, it won't be zero.
  3278. assert!(
  3279. storage.alive_bytes_exclude_zero_lamport_single_ref_accounts() < alive_bytes
  3280. );
  3281. }
  3282. }
  3283. }
  3284. );
  3285. fn setup_accounts_db_cache_clean(
  3286. num_slots: usize,
  3287. scan_slot: Option<Slot>,
  3288. write_cache_limit_bytes: Option<u64>,
  3289. ) -> (Arc<AccountsDb>, Vec<Pubkey>, Vec<Slot>, Option<ScanTracker>) {
  3290. let mut accounts_db = AccountsDb::new_single_for_tests();
  3291. accounts_db.write_cache_limit_bytes = write_cache_limit_bytes;
  3292. let accounts_db = Arc::new(accounts_db);
  3293. let slots: Vec<_> = (0..num_slots as Slot).collect();
  3294. let stall_slot = num_slots as Slot;
  3295. let scan_stall_key = Pubkey::new_unique();
  3296. let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
  3297. .take(num_slots)
  3298. .collect();
  3299. if scan_slot.is_some() {
  3300. accounts_db.store_for_tests(
  3301. // Store it in a slot that isn't returned in `slots`
  3302. (
  3303. stall_slot,
  3304. &[(
  3305. &scan_stall_key,
  3306. &AccountSharedData::new(1, 0, &Pubkey::default()),
  3307. )][..],
  3308. ),
  3309. );
  3310. }
  3311. // Store some subset of the keys in slots 0..num_slots
  3312. let mut scan_tracker = None;
  3313. for slot in &slots {
  3314. for key in &keys[*slot as usize..] {
  3315. let space = 1; // 1 byte allows us to track by size
  3316. accounts_db.store_for_tests((
  3317. *slot,
  3318. &[(key, &AccountSharedData::new(1, space, &Pubkey::default()))][..],
  3319. ));
  3320. }
  3321. accounts_db.add_root(*slot as Slot);
  3322. if Some(*slot) == scan_slot {
  3323. let ancestors = Arc::new(vec![(stall_slot, 1), (*slot, 1)].into_iter().collect());
  3324. let bank_id = 0;
  3325. scan_tracker = Some(setup_scan(
  3326. accounts_db.clone(),
  3327. ancestors,
  3328. bank_id,
  3329. scan_stall_key,
  3330. ));
  3331. assert_eq!(
  3332. accounts_db.accounts_index.min_ongoing_scan_root().unwrap(),
  3333. *slot
  3334. );
  3335. }
  3336. }
  3337. accounts_db.accounts_cache.remove_slot(stall_slot);
  3338. // If there's <= max_cache_slots(), no slots should be flushed
  3339. if accounts_db.accounts_cache.num_slots() <= max_cache_slots() {
  3340. accounts_db.flush_accounts_cache(false, None);
  3341. assert_eq!(accounts_db.accounts_cache.num_slots(), num_slots);
  3342. }
  3343. (accounts_db, keys, slots, scan_tracker)
  3344. }
  3345. #[test]
  3346. fn test_accounts_db_cache_clean_dead_slots() {
  3347. let num_slots = 10;
  3348. let (accounts_db, keys, mut slots, _) = setup_accounts_db_cache_clean(num_slots, None, None);
  3349. let last_dead_slot = (num_slots - 1) as Slot;
  3350. assert_eq!(*slots.last().unwrap(), last_dead_slot);
  3351. let alive_slot = last_dead_slot as Slot + 1;
  3352. slots.push(alive_slot);
  3353. for key in &keys {
  3354. // Store a slot that overwrites all previous keys, rendering all previous keys dead
  3355. accounts_db.store_for_tests((
  3356. alive_slot,
  3357. &[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))][..],
  3358. ));
  3359. accounts_db.add_root(alive_slot);
  3360. }
  3361. // Before the flush, we can find entries in the database for slots < alive_slot if we specify
  3362. // a smaller max root
  3363. for key in &keys {
  3364. assert!(accounts_db
  3365. .do_load(
  3366. &Ancestors::default(),
  3367. key,
  3368. Some(last_dead_slot),
  3369. LoadHint::Unspecified,
  3370. LOAD_ZERO_LAMPORTS_ANY_TESTS
  3371. )
  3372. .is_some());
  3373. }
  3374. // If no `max_clean_root` is specified, cleaning should purge all flushed slots
  3375. accounts_db.flush_accounts_cache(true, None);
  3376. assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
  3377. assert_eq!(
  3378. accounts_db.accounts_cache.fetch_max_flush_root(),
  3379. alive_slot,
  3380. );
  3381. // Specifying a max_root < alive_slot, should not return any more entries,
  3382. // as those have been purged from the accounts index for the dead slots.
  3383. for key in &keys {
  3384. assert!(accounts_db
  3385. .do_load(
  3386. &Ancestors::default(),
  3387. key,
  3388. Some(last_dead_slot),
  3389. LoadHint::Unspecified,
  3390. LOAD_ZERO_LAMPORTS_ANY_TESTS
  3391. )
  3392. .is_none());
  3393. }
  3394. // Each slot should only have one entry in the storage, since all other accounts were
  3395. // cleaned due to later updates
  3396. for slot in &slots {
  3397. if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage(
  3398. *slot as Slot,
  3399. |_| Some(0),
  3400. |slot_accounts: &mut HashSet<Pubkey>, stored_account, _data| {
  3401. slot_accounts.insert(*stored_account.pubkey());
  3402. },
  3403. ScanAccountStorageData::NoData,
  3404. ) {
  3405. if *slot == alive_slot {
  3406. assert_eq!(slot_accounts.len(), keys.len());
  3407. } else {
  3408. assert!(slot_accounts.is_empty());
  3409. }
  3410. } else {
  3411. panic!("Expected slot to be in storage, not cache");
  3412. }
  3413. }
  3414. }
  3415. #[test]
  3416. fn test_accounts_db_cache_clean() {
  3417. let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(10, None, None);
  3418. // If no `max_clean_root` is specified, cleaning should purge all flushed slots
  3419. accounts_db.flush_accounts_cache(true, None);
  3420. assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
  3421. assert_eq!(
  3422. accounts_db.accounts_cache.fetch_max_flush_root(),
  3423. *slots.last().unwrap()
  3424. );
  3425. // Each slot should only have one entry in the storage, since all other accounts were
  3426. // cleaned due to later updates
  3427. for slot in &slots {
  3428. if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage(
  3429. *slot as Slot,
  3430. |_| Some(0),
  3431. |slot_account: &mut Pubkey, stored_account, _data| {
  3432. *slot_account = *stored_account.pubkey();
  3433. },
  3434. ScanAccountStorageData::NoData,
  3435. ) {
  3436. assert_eq!(slot_account, keys[*slot as usize]);
  3437. } else {
  3438. panic!("Everything should have been flushed")
  3439. }
  3440. }
  3441. }
  3442. fn run_test_accounts_db_cache_clean_max_root(
  3443. num_slots: usize,
  3444. requested_flush_root: Slot,
  3445. scan_root: Option<Slot>,
  3446. ) {
  3447. assert!(requested_flush_root < (num_slots as Slot));
  3448. let (accounts_db, keys, slots, scan_tracker) =
  3449. setup_accounts_db_cache_clean(num_slots, scan_root, Some(max_cache_slots() as u64));
  3450. let is_cache_at_limit = num_slots - requested_flush_root as usize - 1 > max_cache_slots();
  3451. // If:
  3452. // 1) `requested_flush_root` is specified,
  3453. // 2) not at the cache limit, i.e. `is_cache_at_limit == false`, then
  3454. // `flush_accounts_cache()` should clean and flush only slots <= requested_flush_root,
  3455. accounts_db.flush_accounts_cache(true, Some(requested_flush_root));
  3456. if !is_cache_at_limit {
  3457. // Should flush all slots between 0..=requested_flush_root
  3458. assert_eq!(
  3459. accounts_db.accounts_cache.num_slots(),
  3460. slots.len() - requested_flush_root as usize - 1
  3461. );
  3462. } else {
  3463. // Otherwise, if we are at the cache limit, all roots will be flushed
  3464. assert_eq!(accounts_db.accounts_cache.num_slots(), 0,);
  3465. }
  3466. let expected_max_flushed_root = if !is_cache_at_limit {
  3467. // Should flush all slots between 0..=requested_flush_root
  3468. requested_flush_root
  3469. } else {
  3470. // Otherwise, if we are at the cache limit, all roots will be flushed
  3471. num_slots as Slot - 1
  3472. };
  3473. assert_eq!(
  3474. accounts_db.accounts_cache.fetch_max_flush_root(),
  3475. expected_max_flushed_root,
  3476. );
  3477. for slot in &slots {
  3478. let slot_accounts = accounts_db.scan_account_storage(
  3479. *slot as Slot,
  3480. |loaded_account| {
  3481. assert!(
  3482. !is_cache_at_limit,
  3483. "When cache is at limit, all roots should have been flushed to storage"
  3484. );
  3485. // All slots <= requested_flush_root should have been flushed, regardless
  3486. // of ongoing scans
  3487. assert!(*slot > requested_flush_root);
  3488. Some(*loaded_account.pubkey())
  3489. },
  3490. |slot_accounts: &mut HashSet<Pubkey>, stored_account, _data| {
  3491. slot_accounts.insert(*stored_account.pubkey());
  3492. if !is_cache_at_limit {
  3493. // Only true when the limit hasn't been reached and there are still
  3494. // slots left in the cache
  3495. assert!(*slot <= requested_flush_root);
  3496. }
  3497. },
  3498. ScanAccountStorageData::NoData,
  3499. );
  3500. let slot_accounts = match slot_accounts {
  3501. ScanStorageResult::Cached(slot_accounts) => {
  3502. slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
  3503. }
  3504. ScanStorageResult::Stored(slot_accounts) => {
  3505. slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
  3506. }
  3507. };
  3508. let expected_accounts =
  3509. if *slot >= requested_flush_root || *slot >= scan_root.unwrap_or(Slot::MAX) {
  3510. // 1) If slot > `requested_flush_root`, then either:
  3511. // a) If `is_cache_at_limit == false`, still in the cache
  3512. // b) if `is_cache_at_limit == true`, were not cleaned before being flushed to storage.
  3513. //
  3514. // In both cases all the *original* updates at index `slot` were uncleaned and thus
  3515. // should be discoverable by this scan.
  3516. //
  3517. // 2) If slot == `requested_flush_root`, the slot was not cleaned before being flushed to storage,
  3518. // so it also contains all the original updates.
  3519. //
  3520. // 3) If *slot >= scan_root, then we should not clean it either
  3521. keys[*slot as usize..]
  3522. .iter()
  3523. .cloned()
  3524. .collect::<HashSet<Pubkey>>()
  3525. } else {
  3526. // Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
  3527. // to storage, should only contain one account
  3528. std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
  3529. };
  3530. assert_eq!(slot_accounts, expected_accounts);
  3531. }
  3532. if let Some(scan_tracker) = scan_tracker {
  3533. scan_tracker.exit().unwrap();
  3534. }
  3535. }
  3536. #[test]
  3537. fn test_accounts_db_cache_clean_max_root() {
  3538. let requested_flush_root = 5;
  3539. run_test_accounts_db_cache_clean_max_root(10, requested_flush_root, None);
  3540. }
  3541. #[test]
  3542. fn test_accounts_db_cache_clean_max_root_with_scan() {
  3543. let requested_flush_root = 5;
  3544. run_test_accounts_db_cache_clean_max_root(
  3545. 10,
  3546. requested_flush_root,
  3547. Some(requested_flush_root - 1),
  3548. );
  3549. run_test_accounts_db_cache_clean_max_root(
  3550. 10,
  3551. requested_flush_root,
  3552. Some(requested_flush_root + 1),
  3553. );
  3554. }
  3555. #[test]
  3556. fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit() {
  3557. let requested_flush_root = 5;
  3558. // Test that if there are > max_cache_slots() in the cache after flush, then more roots
  3559. // will be flushed
  3560. run_test_accounts_db_cache_clean_max_root(
  3561. max_cache_slots() + requested_flush_root as usize + 2,
  3562. requested_flush_root,
  3563. None,
  3564. );
  3565. }
  3566. #[test]
  3567. fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit_and_scan() {
  3568. let requested_flush_root = 5;
  3569. // Test that if there are > max_cache_slots() in the cache after flush, then more roots
  3570. // will be flushed
  3571. run_test_accounts_db_cache_clean_max_root(
  3572. max_cache_slots() + requested_flush_root as usize + 2,
  3573. requested_flush_root,
  3574. Some(requested_flush_root - 1),
  3575. );
  3576. run_test_accounts_db_cache_clean_max_root(
  3577. max_cache_slots() + requested_flush_root as usize + 2,
  3578. requested_flush_root,
  3579. Some(requested_flush_root + 1),
  3580. );
  3581. }
  3582. fn run_flush_rooted_accounts_cache(should_clean: bool) {
  3583. let num_slots = 10;
  3584. let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(num_slots, None, None);
  3585. // If no cleaning is specified, then flush everything
  3586. accounts_db.flush_rooted_accounts_cache(None, should_clean);
  3587. for slot in &slots {
  3588. let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage(
  3589. *slot as Slot,
  3590. |_| Some(0),
  3591. |slot_account: &mut HashSet<Pubkey>, stored_account, _data| {
  3592. slot_account.insert(*stored_account.pubkey());
  3593. },
  3594. ScanAccountStorageData::NoData,
  3595. ) else {
  3596. panic!("All roots should have been flushed to storage");
  3597. };
  3598. let expected_accounts = if !should_clean || slot == slots.last().unwrap() {
  3599. // The slot was not cleaned before being flushed to storage,
  3600. // so it also contains all the original updates.
  3601. keys[*slot as usize..]
  3602. .iter()
  3603. .cloned()
  3604. .collect::<HashSet<Pubkey>>()
  3605. } else {
  3606. // If clean was specified, only the latest slot should have all the updates.
  3607. // All these other slots have been cleaned before flush
  3608. std::iter::once(keys[*slot as usize]).collect::<HashSet<Pubkey>>()
  3609. };
  3610. assert_eq!(slot_accounts, expected_accounts);
  3611. }
  3612. }
  3613. #[test]
  3614. fn test_flush_rooted_accounts_cache_with_clean() {
  3615. run_flush_rooted_accounts_cache(true);
  3616. }
  3617. #[test]
  3618. fn test_flush_rooted_accounts_cache_without_clean() {
  3619. run_flush_rooted_accounts_cache(false);
  3620. }
  3621. #[test]
  3622. fn test_shrink_unref() {
  3623. let db = AccountsDb::new_single_for_tests();
  3624. let epoch_schedule = EpochSchedule::default();
  3625. let account_key1 = Pubkey::new_unique();
  3626. let account_key2 = Pubkey::new_unique();
  3627. let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3628. // Store into slot 0
  3629. db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
  3630. db.store_for_tests((0, [(&account_key2, &account1)].as_slice()));
  3631. db.add_root(0);
  3632. // Make account_key1 in slot 0 outdated by updating in rooted slot 1
  3633. db.store_for_tests((1, &[(&account_key1, &account1)][..]));
  3634. db.add_root(1);
  3635. // Flush without cleaning to avoid reclaiming account_key1 early
  3636. db.flush_rooted_accounts_cache(None, false);
  3637. // Clean to remove outdated entry from slot 0
  3638. db.clean_accounts(Some(1), false, &EpochSchedule::default());
  3639. // Shrink Slot 0
  3640. {
  3641. let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap();
  3642. shrink_candidate_slots.insert(0);
  3643. }
  3644. db.shrink_candidate_slots(&epoch_schedule);
  3645. // Make slot 0 dead by updating the remaining key
  3646. db.store_for_tests((2, &[(&account_key2, &account1)][..]));
  3647. db.add_root(2);
  3648. // Flush without cleaning to avoid reclaiming account_key2 early
  3649. db.flush_rooted_accounts_cache(None, false);
  3650. // Should be one store before clean for slot 0
  3651. db.get_and_assert_single_storage(0);
  3652. db.clean_accounts(Some(2), false, &EpochSchedule::default());
  3653. // No stores should exist for slot 0 after clean
  3654. assert_no_storages_at_slot(&db, 0);
  3655. // Ref count for `account_key1` (account removed earlier by shrink)
  3656. // should be 1, since it was only stored in slot 0 and 1, and slot 0
  3657. // is now dead
  3658. db.assert_ref_count(&account_key1, 1);
  3659. }
  3660. #[test]
  3661. fn test_clean_drop_dead_zero_lamport_single_ref_accounts() {
  3662. let accounts_db = AccountsDb::new_single_for_tests();
  3663. let epoch_schedule = EpochSchedule::default();
  3664. let key1 = Pubkey::new_unique();
  3665. let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  3666. let one_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3667. // slot 0 - stored a 1-lamport account
  3668. let slot = 0;
  3669. accounts_db.store_for_tests((slot, &[(&key1, &one_account)][..]));
  3670. accounts_db.add_root(slot);
  3671. // slot 1 - store a 0 -lamport account
  3672. let slot = 1;
  3673. accounts_db.store_for_tests((slot, &[(&key1, &zero_account)][..]));
  3674. accounts_db.add_root(slot);
  3675. accounts_db.flush_accounts_cache(true, None);
  3676. // run clean
  3677. accounts_db.clean_accounts(Some(1), false, &epoch_schedule);
  3678. // After clean, both slot0 and slot1 should be marked dead and dropped
  3679. // from the store map.
  3680. assert!(accounts_db.storage.get_slot_storage_entry(0).is_none());
  3681. assert!(accounts_db.storage.get_slot_storage_entry(1).is_none());
  3682. }
  3683. #[test]
  3684. fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() {
  3685. let db = AccountsDb::new_single_for_tests();
  3686. let account_key1 = Pubkey::new_unique();
  3687. let account_key2 = Pubkey::new_unique();
  3688. let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3689. let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  3690. // Store into slot 0
  3691. db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
  3692. db.add_root_and_flush_write_cache(0);
  3693. // Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account
  3694. // And store one additional live account to make the store still alive after clean.
  3695. db.store_for_tests((1, &[(&account_key1, &account0)][..]));
  3696. db.store_for_tests((1, &[(&account_key2, &account1)][..]));
  3697. db.add_root(1);
  3698. // Flushes all roots
  3699. db.flush_accounts_cache(true, None);
  3700. // Clean should mark slot 0 dead and drop it. During the dropping, it
  3701. // will find that slot 1 has a single ref zero accounts and mark it.
  3702. db.clean_accounts(Some(1), false, &EpochSchedule::default());
  3703. // Assert that after clean, slot 0 is dropped.
  3704. assert!(db.storage.get_slot_storage_entry(0).is_none());
  3705. // And slot 1's single ref zero accounts is marked. Because slot 1 still
  3706. // has one other alive account, it is not completely dead. So it won't
  3707. // be a candidate for "clean" to drop. Instead, it becomes a candidate
  3708. // for next round shrinking.
  3709. db.assert_ref_count(&account_key1, 1);
  3710. assert_eq!(
  3711. db.get_and_assert_single_storage(1)
  3712. .num_zero_lamport_single_ref_accounts(),
  3713. 1
  3714. );
  3715. assert!(db.shrink_candidate_slots.lock().unwrap().contains(&1));
  3716. }
  3717. /// Tests that shrink correctly marks newly single ref zero lamport accounts and sends them to clean
  3718. /// This test is still relevant with obsolete accounts enabled, but can be removed if all
  3719. /// scenarios where flush_write_cache doesn't clean are eliminated.
  3720. #[test]
  3721. fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
  3722. let db = AccountsDb::new_single_for_tests();
  3723. let epoch_schedule = EpochSchedule::default();
  3724. let account_key1 = Pubkey::new_unique();
  3725. let account_key2 = Pubkey::new_unique();
  3726. let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3727. let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  3728. // Store into slot 0
  3729. db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
  3730. db.store_for_tests((0, [(&account_key2, &account1)].as_slice()));
  3731. db.add_root_and_flush_write_cache(0);
  3732. // Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account
  3733. db.store_for_tests((1, &[(&account_key1, &account0)][..]));
  3734. db.add_root(1);
  3735. // Flushes all roots without clean
  3736. db.flush_rooted_accounts_cache(None, false);
  3737. // Clean to remove outdated entry from slot 0
  3738. db.clean_accounts(Some(1), false, &EpochSchedule::default());
  3739. // Shrink Slot 0
  3740. {
  3741. let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap();
  3742. shrink_candidate_slots.insert(0);
  3743. }
  3744. db.shrink_candidate_slots(&epoch_schedule);
  3745. // After shrink slot 0, check that the zero_lamport account on slot 1
  3746. // should be marked since it become singe_ref.
  3747. db.assert_ref_count(&account_key1, 1);
  3748. assert_eq!(
  3749. db.get_and_assert_single_storage(1)
  3750. .num_zero_lamport_single_ref_accounts(),
  3751. 1
  3752. );
  3753. // And now, slot 1 should be marked complete dead, which will be added
  3754. // to uncleaned slots, which handle dropping dead storage. And it WON'T
  3755. // be participating shrinking in the next round.
  3756. assert!(db.dirty_stores.contains_key(&1));
  3757. assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&1));
  3758. // Now, make slot 0 dead by updating the remaining key
  3759. db.store_for_tests((2, &[(&account_key2, &account1)][..]));
  3760. db.add_root(2);
  3761. // Flushes all roots
  3762. db.flush_accounts_cache(true, None);
  3763. // Should be one store before clean for slot 1
  3764. db.get_and_assert_single_storage(1);
  3765. db.clean_accounts(Some(2), false, &EpochSchedule::default());
  3766. // No stores should exist for slot 0. If obsolete accounts are enabled, slot 0 stores are
  3767. // cleaned when slot 2 is flushed. If obsolete accounts are disabled, slot 0 stores are
  3768. // cleaned during the clean_accounts function call.
  3769. assert_no_storages_at_slot(&db, 0);
  3770. // No store should exit for slot 1 too as it has only a zero lamport single ref account.
  3771. assert_no_storages_at_slot(&db, 1);
  3772. // Store 2 should have a single account.
  3773. db.assert_ref_count(&account_key2, 1);
  3774. db.get_and_assert_single_storage(2);
  3775. }
  3776. define_accounts_db_test!(test_partial_clean, |db| {
  3777. let account_key1 = Pubkey::new_unique();
  3778. let account_key2 = Pubkey::new_unique();
  3779. let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3780. let account2 = AccountSharedData::new(2, 0, AccountSharedData::default().owner());
  3781. let account3 = AccountSharedData::new(3, 0, AccountSharedData::default().owner());
  3782. let account4 = AccountSharedData::new(4, 0, AccountSharedData::default().owner());
  3783. // Store accounts into slots 0 and 1
  3784. db.store_for_tests((
  3785. 0,
  3786. [(&account_key1, &account1), (&account_key2, &account1)].as_slice(),
  3787. ));
  3788. db.store_for_tests((1, [(&account_key1, &account2)].as_slice()));
  3789. db.print_accounts_stats("pre-clean1");
  3790. // clean accounts - no accounts should be cleaned, since no rooted slots
  3791. //
  3792. // Checking that the uncleaned_pubkeys are not pre-maturely removed
  3793. // such that when the slots are rooted, and can actually be cleaned, then the
  3794. // delta keys are still there.
  3795. db.clean_accounts_for_tests();
  3796. db.print_accounts_stats("post-clean1");
  3797. // Assert that cache entries are still present
  3798. assert!(!db.accounts_cache.slot_cache(0).unwrap().is_empty());
  3799. assert!(!db.accounts_cache.slot_cache(1).unwrap().is_empty());
  3800. // root slot 0
  3801. db.add_root_and_flush_write_cache(0);
  3802. // store into slot 2
  3803. db.store_for_tests((
  3804. 2,
  3805. [(&account_key2, &account3), (&account_key1, &account3)].as_slice(),
  3806. ));
  3807. db.clean_accounts_for_tests();
  3808. db.print_accounts_stats("post-clean2");
  3809. // root slots 1
  3810. db.add_root_and_flush_write_cache(1);
  3811. db.clean_accounts_for_tests();
  3812. db.print_accounts_stats("post-clean3");
  3813. db.store_for_tests((3, [(&account_key2, &account4)].as_slice()));
  3814. db.add_root_and_flush_write_cache(3);
  3815. // Check that we can clean where max_root=3 and slot=2 is not rooted
  3816. db.clean_accounts_for_tests();
  3817. assert!(db.uncleaned_pubkeys.is_empty());
  3818. db.print_accounts_stats("post-clean4");
  3819. assert!(db.storage.is_empty_entry(0));
  3820. assert!(!db.storage.is_empty_entry(1));
  3821. });
  3822. const RACY_SLEEP_MS: u64 = 10;
  3823. const RACE_TIME: u64 = 5;
  3824. fn start_load_thread(
  3825. with_retry: bool,
  3826. ancestors: Ancestors,
  3827. db: Arc<AccountsDb>,
  3828. exit: Arc<AtomicBool>,
  3829. pubkey: Arc<Pubkey>,
  3830. expected_lamports: impl Fn(&(AccountSharedData, Slot)) -> u64 + Send + 'static,
  3831. ) -> JoinHandle<()> {
  3832. let load_hint = if with_retry {
  3833. LoadHint::FixedMaxRoot
  3834. } else {
  3835. LoadHint::Unspecified
  3836. };
  3837. std::thread::Builder::new()
  3838. .name("account-do-load".to_string())
  3839. .spawn(move || {
  3840. loop {
  3841. if exit.load(Ordering::Relaxed) {
  3842. return;
  3843. }
  3844. // Meddle load_limit to cover all branches of implementation.
  3845. // There should absolutely no behavioral difference; the load_limit triggered
  3846. // slow branch should only affect the performance.
  3847. // Ordering::Relaxed is ok because of no data dependencies; the modified field is
  3848. // completely free-standing cfg(test) control-flow knob.
  3849. db.load_limit
  3850. .store(thread_rng().gen_range(0..10) as u64, Ordering::Relaxed);
  3851. // Load should never be unable to find this key
  3852. let loaded_account = db
  3853. .do_load(
  3854. &ancestors,
  3855. &pubkey,
  3856. None,
  3857. load_hint,
  3858. LOAD_ZERO_LAMPORTS_ANY_TESTS,
  3859. )
  3860. .unwrap();
  3861. // slot + 1 == account.lamports because of the account-cache-flush thread
  3862. assert_eq!(
  3863. loaded_account.0.lamports(),
  3864. expected_lamports(&loaded_account)
  3865. );
  3866. }
  3867. })
  3868. .unwrap()
  3869. }
  3870. #[test]
  3871. fn test_load_account_and_cache_flush_race() {
  3872. agave_logger::setup();
  3873. let mut db = AccountsDb::new_single_for_tests();
  3874. db.load_delay = RACY_SLEEP_MS;
  3875. let db = Arc::new(db);
  3876. let pubkey = Arc::new(Pubkey::new_unique());
  3877. let exit = Arc::new(AtomicBool::new(false));
  3878. db.store_for_tests((
  3879. 0,
  3880. &[(
  3881. pubkey.as_ref(),
  3882. &AccountSharedData::new(1, 0, AccountSharedData::default().owner()),
  3883. )][..],
  3884. ));
  3885. db.add_root(0);
  3886. db.flush_accounts_cache(true, None);
  3887. let t_flush_accounts_cache = {
  3888. let db = db.clone();
  3889. let exit = exit.clone();
  3890. let pubkey = pubkey.clone();
  3891. let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3892. std::thread::Builder::new()
  3893. .name("account-cache-flush".to_string())
  3894. .spawn(move || {
  3895. let mut slot: Slot = 1;
  3896. loop {
  3897. if exit.load(Ordering::Relaxed) {
  3898. return;
  3899. }
  3900. account.set_lamports(slot + 1);
  3901. db.store_for_tests((slot, &[(pubkey.as_ref(), &account)][..]));
  3902. db.add_root(slot);
  3903. sleep(Duration::from_millis(RACY_SLEEP_MS));
  3904. db.flush_accounts_cache(true, None);
  3905. slot += 1;
  3906. }
  3907. })
  3908. .unwrap()
  3909. };
  3910. let t_do_load = start_load_thread(
  3911. false,
  3912. Ancestors::default(),
  3913. db,
  3914. exit.clone(),
  3915. pubkey,
  3916. |(_, slot)| slot + 1,
  3917. );
  3918. sleep(Duration::from_secs(RACE_TIME));
  3919. exit.store(true, Ordering::Relaxed);
  3920. t_flush_accounts_cache.join().unwrap();
  3921. t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
  3922. }
  3923. fn do_test_load_account_and_shrink_race(with_retry: bool) {
  3924. let mut db = AccountsDb::new_single_for_tests();
  3925. let epoch_schedule = EpochSchedule::default();
  3926. db.load_delay = RACY_SLEEP_MS;
  3927. let db = Arc::new(db);
  3928. let pubkey = Arc::new(Pubkey::new_unique());
  3929. let exit = Arc::new(AtomicBool::new(false));
  3930. let slot = 1;
  3931. // Store an account
  3932. let lamports = 42;
  3933. let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3934. account.set_lamports(lamports);
  3935. db.store_for_tests((slot, [(pubkey.as_ref(), &account)].as_slice()));
  3936. // Set the slot as a root so account loads will see the contents of this slot
  3937. db.add_root(slot);
  3938. let t_shrink_accounts = {
  3939. let db = db.clone();
  3940. let exit = exit.clone();
  3941. std::thread::Builder::new()
  3942. .name("account-shrink".to_string())
  3943. .spawn(move || loop {
  3944. if exit.load(Ordering::Relaxed) {
  3945. return;
  3946. }
  3947. // Simulate adding shrink candidates from clean_accounts()
  3948. db.shrink_candidate_slots.lock().unwrap().insert(slot);
  3949. db.shrink_candidate_slots(&epoch_schedule);
  3950. })
  3951. .unwrap()
  3952. };
  3953. let t_do_load = start_load_thread(
  3954. with_retry,
  3955. Ancestors::default(),
  3956. db,
  3957. exit.clone(),
  3958. pubkey,
  3959. move |_| lamports,
  3960. );
  3961. sleep(Duration::from_secs(RACE_TIME));
  3962. exit.store(true, Ordering::Relaxed);
  3963. t_shrink_accounts.join().unwrap();
  3964. t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
  3965. }
  3966. #[test]
  3967. fn test_load_account_and_shrink_race_with_retry() {
  3968. do_test_load_account_and_shrink_race(true);
  3969. }
  3970. #[test]
  3971. fn test_load_account_and_shrink_race_without_retry() {
  3972. do_test_load_account_and_shrink_race(false);
  3973. }
  3974. #[test]
  3975. fn test_cache_flush_delayed_remove_unrooted_race() {
  3976. let mut db = AccountsDb::new_single_for_tests();
  3977. db.load_delay = RACY_SLEEP_MS;
  3978. let db = Arc::new(db);
  3979. let slot = 10;
  3980. let bank_id = 10;
  3981. let lamports = 42;
  3982. let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  3983. account.set_lamports(lamports);
  3984. // Start up a thread to flush the accounts cache
  3985. let (flush_trial_start_sender, flush_trial_start_receiver) = crossbeam_channel::unbounded();
  3986. let (flush_done_sender, flush_done_receiver) = crossbeam_channel::unbounded();
  3987. let t_flush_cache = {
  3988. let db = db.clone();
  3989. std::thread::Builder::new()
  3990. .name("account-cache-flush".to_string())
  3991. .spawn(move || loop {
  3992. // Wait for the signal to start a trial
  3993. if flush_trial_start_receiver.recv().is_err() {
  3994. return;
  3995. }
  3996. db.flush_slot_cache(10);
  3997. flush_done_sender.send(()).unwrap();
  3998. })
  3999. .unwrap()
  4000. };
  4001. // Start up a thread remove the slot
  4002. let (remove_trial_start_sender, remove_trial_start_receiver) = crossbeam_channel::unbounded();
  4003. let (remove_done_sender, remove_done_receiver) = crossbeam_channel::unbounded();
  4004. let t_remove = {
  4005. let db = db.clone();
  4006. std::thread::Builder::new()
  4007. .name("account-remove".to_string())
  4008. .spawn(move || loop {
  4009. // Wait for the signal to start a trial
  4010. if remove_trial_start_receiver.recv().is_err() {
  4011. return;
  4012. }
  4013. db.remove_unrooted_slots(&[(slot, bank_id)]);
  4014. remove_done_sender.send(()).unwrap();
  4015. })
  4016. .unwrap()
  4017. };
  4018. let num_trials = 10;
  4019. for _ in 0..num_trials {
  4020. let pubkey = Pubkey::new_unique();
  4021. db.store_for_tests((slot, &[(&pubkey, &account)][..]));
  4022. // Wait for both threads to finish
  4023. flush_trial_start_sender.send(()).unwrap();
  4024. remove_trial_start_sender.send(()).unwrap();
  4025. let _ = flush_done_receiver.recv();
  4026. let _ = remove_done_receiver.recv();
  4027. }
  4028. drop(flush_trial_start_sender);
  4029. drop(remove_trial_start_sender);
  4030. t_flush_cache.join().unwrap();
  4031. t_remove.join().unwrap();
  4032. }
  4033. #[test]
  4034. fn test_cache_flush_remove_unrooted_race_multiple_slots() {
  4035. let db = AccountsDb::new_single_for_tests();
  4036. let db = Arc::new(db);
  4037. let num_cached_slots = 100;
  4038. let num_trials = 100;
  4039. let (new_trial_start_sender, new_trial_start_receiver) = crossbeam_channel::unbounded();
  4040. let (flush_done_sender, flush_done_receiver) = crossbeam_channel::unbounded();
  4041. // Start up a thread to flush the accounts cache
  4042. let t_flush_cache = {
  4043. let db = db.clone();
  4044. std::thread::Builder::new()
  4045. .name("account-cache-flush".to_string())
  4046. .spawn(move || loop {
  4047. // Wait for the signal to start a trial
  4048. if new_trial_start_receiver.recv().is_err() {
  4049. return;
  4050. }
  4051. for slot in 0..num_cached_slots {
  4052. db.flush_slot_cache(slot);
  4053. }
  4054. flush_done_sender.send(()).unwrap();
  4055. })
  4056. .unwrap()
  4057. };
  4058. let exit = Arc::new(AtomicBool::new(false));
  4059. let t_spurious_signal = {
  4060. let db = db.clone();
  4061. let exit = exit.clone();
  4062. std::thread::Builder::new()
  4063. .name("account-cache-flush".to_string())
  4064. .spawn(move || loop {
  4065. if exit.load(Ordering::Relaxed) {
  4066. return;
  4067. }
  4068. // Simulate spurious wake-up that can happen, but is too rare to
  4069. // otherwise depend on in tests.
  4070. db.remove_unrooted_slots_synchronization.signal.notify_all();
  4071. })
  4072. .unwrap()
  4073. };
  4074. // Run multiple trials. Has the added benefit of rewriting the same slots after we've
  4075. // dumped them in previous trials.
  4076. for _ in 0..num_trials {
  4077. // Store an account
  4078. let lamports = 42;
  4079. let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
  4080. account.set_lamports(lamports);
  4081. // Pick random 50% of the slots to pass to `remove_unrooted_slots()`
  4082. let mut all_slots: Vec<(Slot, BankId)> = (0..num_cached_slots)
  4083. .map(|slot| {
  4084. let bank_id = slot + 1;
  4085. (slot, bank_id)
  4086. })
  4087. .collect();
  4088. all_slots.shuffle(&mut rand::thread_rng());
  4089. let slots_to_dump = &all_slots[0..num_cached_slots as usize / 2];
  4090. let slots_to_keep = &all_slots[num_cached_slots as usize / 2..];
  4091. // Set up a one account per slot across many different slots, track which
  4092. // pubkey was stored in each slot.
  4093. let slot_to_pubkey_map: HashMap<Slot, Pubkey> = (0..num_cached_slots)
  4094. .map(|slot| {
  4095. let pubkey = Pubkey::new_unique();
  4096. db.store_for_tests((slot, &[(&pubkey, &account)][..]));
  4097. (slot, pubkey)
  4098. })
  4099. .collect();
  4100. // Signal the flushing shred to start flushing
  4101. new_trial_start_sender.send(()).unwrap();
  4102. // Here we want to test both:
  4103. // 1) Flush thread starts flushing a slot before we try dumping it.
  4104. // 2) Flushing thread trying to flush while/after we're trying to dump the slot,
  4105. // in which case flush should ignore/move past the slot to be dumped
  4106. //
  4107. // Hence, we split into chunks to get the dumping of each chunk to race with the
  4108. // flushes. If we were to dump the entire chunk at once, then this reduces the possibility
  4109. // of the flush occurring first since the dumping logic reserves all the slots it's about
  4110. // to dump immediately.
  4111. for chunks in slots_to_dump.chunks(slots_to_dump.len() / 2) {
  4112. db.remove_unrooted_slots(chunks);
  4113. }
  4114. // Check that all the slots in `slots_to_dump` were completely removed from the
  4115. // cache, storage, and index
  4116. for (slot, _) in slots_to_dump {
  4117. assert_no_storages_at_slot(&db, *slot);
  4118. assert!(db.accounts_cache.slot_cache(*slot).is_none());
  4119. let account_in_slot = slot_to_pubkey_map[slot];
  4120. assert!(!db.accounts_index.contains(&account_in_slot));
  4121. }
  4122. // Wait for flush to finish before starting next trial
  4123. flush_done_receiver.recv().unwrap();
  4124. for (slot, bank_id) in slots_to_keep {
  4125. let account_in_slot = slot_to_pubkey_map[slot];
  4126. assert!(db
  4127. .load(
  4128. &Ancestors::from(vec![(*slot, 0)]),
  4129. &account_in_slot,
  4130. LoadHint::FixedMaxRoot
  4131. )
  4132. .is_some());
  4133. // Clear for next iteration so that `assert!(self.storage.get_slot_storage_entry(purged_slot).is_none());`
  4134. // in `purge_slot_pubkeys()` doesn't trigger
  4135. db.remove_unrooted_slots(&[(*slot, *bank_id)]);
  4136. }
  4137. }
  4138. exit.store(true, Ordering::Relaxed);
  4139. drop(new_trial_start_sender);
  4140. t_flush_cache.join().unwrap();
  4141. t_spurious_signal.join().unwrap();
  4142. }
  4143. #[test]
  4144. fn test_collect_uncleaned_slots_up_to_slot() {
  4145. agave_logger::setup();
  4146. let db = AccountsDb::new_single_for_tests();
  4147. let slot1 = 11;
  4148. let slot2 = 222;
  4149. let slot3 = 3333;
  4150. let pubkey1 = Pubkey::new_unique();
  4151. let pubkey2 = Pubkey::new_unique();
  4152. let pubkey3 = Pubkey::new_unique();
  4153. db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
  4154. db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
  4155. db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
  4156. let mut uncleaned_slots1 = db.collect_uncleaned_slots_up_to_slot(slot1);
  4157. let mut uncleaned_slots2 = db.collect_uncleaned_slots_up_to_slot(slot2);
  4158. let mut uncleaned_slots3 = db.collect_uncleaned_slots_up_to_slot(slot3);
  4159. uncleaned_slots1.sort_unstable();
  4160. uncleaned_slots2.sort_unstable();
  4161. uncleaned_slots3.sort_unstable();
  4162. assert_eq!(uncleaned_slots1, [slot1]);
  4163. assert_eq!(uncleaned_slots2, [slot1, slot2]);
  4164. assert_eq!(uncleaned_slots3, [slot1, slot2, slot3]);
  4165. }
  4166. #[test]
  4167. fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() {
  4168. agave_logger::setup();
  4169. let db = AccountsDb::new_single_for_tests();
  4170. let slot1 = 11;
  4171. let slot2 = 222;
  4172. let slot3 = 3333;
  4173. let pubkey1 = Pubkey::new_unique();
  4174. let pubkey2 = Pubkey::new_unique();
  4175. let pubkey3 = Pubkey::new_unique();
  4176. let account1 = AccountSharedData::new(0, 0, &pubkey1);
  4177. let account2 = AccountSharedData::new(0, 0, &pubkey2);
  4178. let account3 = AccountSharedData::new(0, 0, &pubkey3);
  4179. db.store_for_tests((slot1, [(&pubkey1, &account1)].as_slice()));
  4180. db.store_for_tests((slot2, [(&pubkey2, &account2)].as_slice()));
  4181. db.store_for_tests((slot3, [(&pubkey3, &account3)].as_slice()));
  4182. // slot 1 is _not_ a root on purpose
  4183. db.add_root(slot2);
  4184. db.add_root(slot3);
  4185. db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
  4186. db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
  4187. db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
  4188. let num_bins = db.accounts_index.bins();
  4189. let candidates: Box<_> =
  4190. std::iter::repeat_with(|| RwLock::new(HashMap::<Pubkey, CleaningInfo>::new()))
  4191. .take(num_bins)
  4192. .collect();
  4193. db.remove_uncleaned_slots_up_to_slot_and_move_pubkeys(slot3, &candidates);
  4194. let candidates_contain = |pubkey: &Pubkey| {
  4195. candidates
  4196. .iter()
  4197. .any(|bin| bin.read().unwrap().contains(pubkey))
  4198. };
  4199. assert!(candidates_contain(&pubkey1));
  4200. assert!(candidates_contain(&pubkey2));
  4201. assert!(candidates_contain(&pubkey3));
  4202. }
  4203. #[test_case(StorageAccess::Mmap)]
  4204. #[test_case(StorageAccess::File)]
  4205. fn test_shrink_productive(storage_access: StorageAccess) {
  4206. agave_logger::setup();
  4207. let path = Path::new("");
  4208. let file_size = 100;
  4209. let slot = 11;
  4210. let store = Arc::new(AccountStorageEntry::new(
  4211. path,
  4212. slot,
  4213. slot as AccountsFileId,
  4214. file_size,
  4215. AccountsFileProvider::AppendVec,
  4216. storage_access,
  4217. ));
  4218. store.add_account(file_size as usize);
  4219. assert!(!AccountsDb::is_shrinking_productive(&store));
  4220. let store = Arc::new(AccountStorageEntry::new(
  4221. path,
  4222. slot,
  4223. slot as AccountsFileId,
  4224. file_size,
  4225. AccountsFileProvider::AppendVec,
  4226. storage_access,
  4227. ));
  4228. store.add_account(file_size as usize / 2);
  4229. store.add_account(file_size as usize / 4);
  4230. store.remove_accounts(file_size as usize / 4, 1);
  4231. assert!(AccountsDb::is_shrinking_productive(&store));
  4232. store.add_account(file_size as usize / 2);
  4233. assert!(!AccountsDb::is_shrinking_productive(&store));
  4234. }
  4235. #[test_case(StorageAccess::Mmap)]
  4236. #[test_case(StorageAccess::File)]
  4237. fn test_is_candidate_for_shrink(storage_access: StorageAccess) {
  4238. agave_logger::setup();
  4239. let mut accounts = AccountsDb::new_single_for_tests();
  4240. let common_store_path = Path::new("");
  4241. let store_file_size = 100_000;
  4242. let entry = Arc::new(AccountStorageEntry::new(
  4243. common_store_path,
  4244. 0,
  4245. 1,
  4246. store_file_size,
  4247. AccountsFileProvider::AppendVec,
  4248. storage_access,
  4249. ));
  4250. match accounts.shrink_ratio {
  4251. AccountShrinkThreshold::TotalSpace { shrink_ratio } => {
  4252. assert_eq!(
  4253. (DEFAULT_ACCOUNTS_SHRINK_RATIO * 100.) as u64,
  4254. (shrink_ratio * 100.) as u64
  4255. )
  4256. }
  4257. AccountShrinkThreshold::IndividualStore { shrink_ratio: _ } => {
  4258. panic!("Expect the default to be TotalSpace")
  4259. }
  4260. }
  4261. entry
  4262. .alive_bytes
  4263. .store(store_file_size as usize - 1, Ordering::Release);
  4264. assert!(accounts.is_candidate_for_shrink(&entry));
  4265. entry
  4266. .alive_bytes
  4267. .store(store_file_size as usize, Ordering::Release);
  4268. assert!(!accounts.is_candidate_for_shrink(&entry));
  4269. let shrink_ratio = 0.3;
  4270. let file_size_shrink_limit = (store_file_size as f64 * shrink_ratio) as usize;
  4271. entry
  4272. .alive_bytes
  4273. .store(file_size_shrink_limit + 1, Ordering::Release);
  4274. accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio };
  4275. assert!(accounts.is_candidate_for_shrink(&entry));
  4276. accounts.shrink_ratio = AccountShrinkThreshold::IndividualStore { shrink_ratio };
  4277. assert!(!accounts.is_candidate_for_shrink(&entry));
  4278. }
  4279. define_accounts_db_test!(test_calculate_storage_count_and_alive_bytes, |accounts| {
  4280. accounts.accounts_index.set_startup(Startup::Startup);
  4281. let shared_key = solana_pubkey::new_rand();
  4282. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  4283. let slot0 = 0;
  4284. accounts.accounts_index.set_startup(Startup::Startup);
  4285. let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache");
  4286. storage
  4287. .accounts
  4288. .write_accounts(&(slot0, &[(&shared_key, &account)][..]), 0);
  4289. let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap();
  4290. let storage_info = StorageSizeAndCountMap::default();
  4291. let mut reader = append_vec::new_scan_accounts_reader();
  4292. accounts.generate_index_for_slot(&mut reader, &storage, slot0, 0, &storage_info);
  4293. assert_eq!(storage_info.len(), 1);
  4294. for entry in storage_info.iter() {
  4295. let expected_stored_size =
  4296. if accounts.accounts_file_provider == AccountsFileProvider::HotStorage {
  4297. 33
  4298. } else {
  4299. 144
  4300. };
  4301. assert_eq!(
  4302. (entry.key(), entry.value().count, entry.value().stored_size),
  4303. (&0, 1, expected_stored_size)
  4304. );
  4305. }
  4306. accounts.accounts_index.set_startup(Startup::Normal);
  4307. });
  4308. define_accounts_db_test!(
  4309. test_calculate_storage_count_and_alive_bytes_0_accounts,
  4310. |accounts| {
  4311. // empty store
  4312. let storage = accounts.create_and_insert_store(0, 1, "test");
  4313. let storage_info = StorageSizeAndCountMap::default();
  4314. let mut reader = append_vec::new_scan_accounts_reader();
  4315. accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info);
  4316. assert!(storage_info.is_empty());
  4317. }
  4318. );
  4319. define_accounts_db_test!(
  4320. test_calculate_storage_count_and_alive_bytes_2_accounts,
  4321. |accounts| {
  4322. let keys = [
  4323. solana_pubkey::Pubkey::from([0; 32]),
  4324. solana_pubkey::Pubkey::from([255; 32]),
  4325. ];
  4326. accounts.accounts_index.set_startup(Startup::Startup);
  4327. // make sure accounts are in 2 different bins
  4328. assert!(
  4329. (accounts.accounts_index.bins() == 1)
  4330. ^ (accounts
  4331. .accounts_index
  4332. .bin_calculator
  4333. .bin_from_pubkey(&keys[0])
  4334. != accounts
  4335. .accounts_index
  4336. .bin_calculator
  4337. .bin_from_pubkey(&keys[1]))
  4338. );
  4339. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  4340. let account_big = AccountSharedData::new(1, 1000, AccountSharedData::default().owner());
  4341. let slot0 = 0;
  4342. let storage = accounts.create_and_insert_store(slot0, 4_000, "flush_slot_cache");
  4343. storage.accounts.write_accounts(
  4344. &(slot0, &[(&keys[0], &account), (&keys[1], &account_big)][..]),
  4345. 0,
  4346. );
  4347. let storage_info = StorageSizeAndCountMap::default();
  4348. let mut reader = append_vec::new_scan_accounts_reader();
  4349. accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info);
  4350. assert_eq!(storage_info.len(), 1);
  4351. for entry in storage_info.iter() {
  4352. let expected_stored_size =
  4353. if accounts.accounts_file_provider == AccountsFileProvider::HotStorage {
  4354. 1065
  4355. } else {
  4356. 1280
  4357. };
  4358. assert_eq!(
  4359. (entry.key(), entry.value().count, entry.value().stored_size),
  4360. (&0, 2, expected_stored_size)
  4361. );
  4362. }
  4363. accounts.accounts_index.set_startup(Startup::Normal);
  4364. }
  4365. );
  4366. #[test_case(8)]
  4367. #[test_case(5)]
  4368. #[test_case(0)]
  4369. fn test_calculate_storage_count_and_alive_bytes_obsolete_account(
  4370. num_accounts_to_mark_obsolete: usize,
  4371. ) {
  4372. let accounts = AccountsDb::new_single_for_tests();
  4373. accounts.accounts_index.set_startup(Startup::Startup);
  4374. let account_sizes = [1, 5, 10, 50, 100, 500, 1000, 2000];
  4375. // Make sure we have enough accounts to mark obsolete. If this fails, just add more
  4376. // entries to account_sizes
  4377. assert!(account_sizes.len() >= num_accounts_to_mark_obsolete);
  4378. let account_list: Vec<_> = account_sizes
  4379. .into_iter()
  4380. .map(|size| {
  4381. (
  4382. Pubkey::new_unique(),
  4383. AccountSharedData::new(1, size, AccountSharedData::default().owner()),
  4384. )
  4385. })
  4386. .collect();
  4387. let slot0 = 0;
  4388. let storage = accounts.create_and_insert_store(slot0, 10_000, "");
  4389. let offsets = storage
  4390. .accounts
  4391. .write_accounts(&(slot0, &account_list[..]), 0);
  4392. let offsets = offsets.unwrap().offsets;
  4393. let data_lens = storage.accounts.get_account_data_lens(&offsets);
  4394. let mut offsets: Vec<_> = offsets.into_iter().zip(data_lens).collect();
  4395. // Randomize the accounts that get marked obsolete
  4396. let mut rng = rand::thread_rng();
  4397. offsets.shuffle(&mut rng);
  4398. let (accounts_to_mark_obsolete, accounts_to_keep) =
  4399. offsets.split_at(num_accounts_to_mark_obsolete);
  4400. storage
  4401. .obsolete_accounts
  4402. .write()
  4403. .unwrap()
  4404. .mark_accounts_obsolete(accounts_to_mark_obsolete.iter().cloned(), slot0 + 1);
  4405. let storage_info = StorageSizeAndCountMap::default();
  4406. let mut reader = append_vec::new_scan_accounts_reader();
  4407. let info = accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info);
  4408. assert_eq!(
  4409. info.num_obsolete_accounts_skipped,
  4410. num_accounts_to_mark_obsolete as u64
  4411. );
  4412. assert_eq!(storage_info.len(), 1);
  4413. for entry in storage_info.iter() {
  4414. // Sum up the stored size of all non obsolete accounts
  4415. let expected_stored_size: usize = accounts_to_keep
  4416. .iter()
  4417. .map(|(_, data_len)| storage.accounts.calculate_stored_size(*data_len))
  4418. .sum();
  4419. assert_eq!(
  4420. (entry.key(), entry.value().count, entry.value().stored_size),
  4421. (&0, accounts_to_keep.len(), expected_stored_size)
  4422. );
  4423. }
  4424. accounts.accounts_index.set_startup(Startup::Normal);
  4425. }
  4426. define_accounts_db_test!(test_set_storage_count_and_alive_bytes, |accounts| {
  4427. // make sure we have storage 0
  4428. let shared_key = solana_pubkey::new_rand();
  4429. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  4430. let slot0 = 0;
  4431. accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice()));
  4432. accounts.add_root_and_flush_write_cache(slot0);
  4433. // fake out the store count to avoid the assert
  4434. for (_, store) in accounts.storage.iter() {
  4435. store.alive_bytes.store(0, Ordering::Release);
  4436. store.count.store(0, Ordering::Release);
  4437. }
  4438. // count needs to be <= approx stored count in store.
  4439. // approx stored count is 1 in store since we added a single account.
  4440. let count = 1;
  4441. // populate based on made up hash data
  4442. let dashmap = DashMap::default();
  4443. dashmap.insert(
  4444. 0,
  4445. StorageSizeAndCount {
  4446. stored_size: 2,
  4447. count,
  4448. },
  4449. );
  4450. for (_, store) in accounts.storage.iter() {
  4451. assert_eq!(store.count(), 0);
  4452. assert_eq!(store.alive_bytes(), 0);
  4453. }
  4454. accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default());
  4455. assert_eq!(accounts.storage.len(), 1);
  4456. for (_, store) in accounts.storage.iter() {
  4457. assert_eq!(store.id(), 0);
  4458. assert_eq!(store.count(), count);
  4459. assert_eq!(store.alive_bytes(), 2);
  4460. }
  4461. });
  4462. define_accounts_db_test!(test_purge_alive_unrooted_slots_after_clean, |accounts| {
  4463. // Key shared between rooted and nonrooted slot
  4464. let shared_key = solana_pubkey::new_rand();
  4465. // Key to keep the storage entry for the unrooted slot alive
  4466. let unrooted_key = solana_pubkey::new_rand();
  4467. let slot0 = 0;
  4468. let slot1 = 1;
  4469. // Store accounts with greater than 0 lamports
  4470. let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
  4471. accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice()));
  4472. accounts.store_for_tests((slot0, [(&unrooted_key, &account)].as_slice()));
  4473. // Simulate adding dirty pubkeys on bank freeze. Note this is
  4474. // not a rooted slot
  4475. // On the next *rooted* slot, update the `shared_key` account to zero lamports
  4476. let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
  4477. accounts.store_for_tests((slot1, [(&shared_key, &zero_lamport_account)].as_slice()));
  4478. // Simulate adding dirty pubkeys on bank freeze, set root
  4479. accounts.add_root_and_flush_write_cache(slot1);
  4480. // The later rooted zero-lamport update to `shared_key` cannot be cleaned
  4481. // because it is kept alive by the unrooted slot.
  4482. accounts.clean_accounts_for_tests();
  4483. assert!(accounts.accounts_index.contains(&shared_key));
  4484. // Simulate purge_slot() all from AccountsBackgroundService
  4485. accounts.purge_slot(slot0, 0, true);
  4486. // Now clean should clean up the remaining key
  4487. accounts.clean_accounts_for_tests();
  4488. assert!(!accounts.accounts_index.contains(&shared_key));
  4489. assert_no_storages_at_slot(&accounts, slot0);
  4490. });
  4491. /// asserts that not only are there 0 append vecs, but there is not even an entry in the storage map for 'slot'
  4492. fn assert_no_storages_at_slot(db: &AccountsDb, slot: Slot) {
  4493. assert!(db.storage.get_slot_storage_entry(slot).is_none());
  4494. }
  4495. // Test to make sure `clean_accounts()` works properly with `latest_full_snapshot_slot`
  4496. //
  4497. // Basically:
  4498. //
  4499. // - slot 1: set Account1's balance to non-zero
  4500. // - slot 2: set Account1's balance to a different non-zero amount
  4501. // - slot 3: set Account1's balance to zero
  4502. // - call `clean_accounts()` with `max_clean_root` set to 2
  4503. // - ensure Account1 has *not* been purged
  4504. // - ensure the store from slot 1 is cleaned up
  4505. // - call `clean_accounts()` with `latest_full_snapshot_slot` set to 2
  4506. // - ensure Account1 has *not* been purged
  4507. // - call `clean_accounts()` with `latest_full_snapshot_slot` set to 3
  4508. // - ensure Account1 *has* been purged
  4509. define_accounts_db_test!(
  4510. test_clean_accounts_with_latest_full_snapshot_slot,
  4511. |accounts_db| {
  4512. let pubkey = solana_pubkey::new_rand();
  4513. let owner = solana_pubkey::new_rand();
  4514. let space = 0;
  4515. let slot1: Slot = 1;
  4516. let account = AccountSharedData::new(111, space, &owner);
  4517. accounts_db.store_for_tests((slot1, &[(&pubkey, &account)][..]));
  4518. accounts_db.add_root_and_flush_write_cache(slot1);
  4519. let slot2: Slot = 2;
  4520. let account = AccountSharedData::new(222, space, &owner);
  4521. accounts_db.store_for_tests((slot2, &[(&pubkey, &account)][..]));
  4522. accounts_db.add_root_and_flush_write_cache(slot2);
  4523. let slot3: Slot = 3;
  4524. let account = AccountSharedData::new(0, space, &owner);
  4525. accounts_db.store_for_tests((slot3, &[(&pubkey, &account)][..]));
  4526. accounts_db.add_root_and_flush_write_cache(slot3);
  4527. accounts_db.assert_ref_count(&pubkey, 3);
  4528. accounts_db.set_latest_full_snapshot_slot(slot2);
  4529. accounts_db.clean_accounts(Some(slot2), false, &EpochSchedule::default());
  4530. accounts_db.assert_ref_count(&pubkey, 2);
  4531. accounts_db.set_latest_full_snapshot_slot(slot2);
  4532. accounts_db.clean_accounts(None, false, &EpochSchedule::default());
  4533. accounts_db.assert_ref_count(&pubkey, 1);
  4534. accounts_db.set_latest_full_snapshot_slot(slot3);
  4535. accounts_db.clean_accounts(None, false, &EpochSchedule::default());
  4536. accounts_db.assert_ref_count(&pubkey, 0);
  4537. }
  4538. );
  4539. #[test]
  4540. fn test_filter_zero_lamport_clean_for_incremental_snapshots() {
  4541. agave_logger::setup();
  4542. let slot = 10;
  4543. struct TestParameters {
  4544. latest_full_snapshot_slot: Option<Slot>,
  4545. max_clean_root: Option<Slot>,
  4546. should_contain: bool,
  4547. }
  4548. let do_test = |test_params: TestParameters| {
  4549. let account_info = AccountInfo::new(StorageLocation::AppendVec(42, 128), true);
  4550. let pubkey = solana_pubkey::new_rand();
  4551. let mut key_set = HashSet::default();
  4552. key_set.insert(pubkey);
  4553. let store_count = 0;
  4554. let mut store_counts = HashMap::default();
  4555. store_counts.insert(slot, (store_count, key_set));
  4556. let mut candidates = [HashMap::new()];
  4557. candidates[0].insert(
  4558. pubkey,
  4559. CleaningInfo {
  4560. slot_list: SlotList::from([(slot, account_info)]),
  4561. ref_count: 1,
  4562. ..Default::default()
  4563. },
  4564. );
  4565. let accounts_db = AccountsDb::new_single_for_tests();
  4566. if let Some(latest_full_snapshot_slot) = test_params.latest_full_snapshot_slot {
  4567. accounts_db.set_latest_full_snapshot_slot(latest_full_snapshot_slot);
  4568. }
  4569. accounts_db.filter_zero_lamport_clean_for_incremental_snapshots(
  4570. test_params.max_clean_root,
  4571. &store_counts,
  4572. &mut candidates,
  4573. );
  4574. assert_eq!(
  4575. candidates[0].contains_key(&pubkey),
  4576. test_params.should_contain
  4577. );
  4578. };
  4579. // Scenario 1: last full snapshot is NONE
  4580. // In this scenario incremental snapshots are OFF, so always purge
  4581. {
  4582. let latest_full_snapshot_slot = None;
  4583. do_test(TestParameters {
  4584. latest_full_snapshot_slot,
  4585. max_clean_root: Some(slot),
  4586. should_contain: true,
  4587. });
  4588. do_test(TestParameters {
  4589. latest_full_snapshot_slot,
  4590. max_clean_root: None,
  4591. should_contain: true,
  4592. });
  4593. }
  4594. // Scenario 2: last full snapshot is GREATER THAN zero lamport account slot
  4595. // In this scenario always purge, and just test the various permutations of
  4596. // `should_filter_for_incremental_snapshots` based on `max_clean_root`.
  4597. {
  4598. let latest_full_snapshot_slot = Some(slot + 1);
  4599. do_test(TestParameters {
  4600. latest_full_snapshot_slot,
  4601. max_clean_root: latest_full_snapshot_slot,
  4602. should_contain: true,
  4603. });
  4604. do_test(TestParameters {
  4605. latest_full_snapshot_slot,
  4606. max_clean_root: latest_full_snapshot_slot.map(|s| s + 1),
  4607. should_contain: true,
  4608. });
  4609. do_test(TestParameters {
  4610. latest_full_snapshot_slot,
  4611. max_clean_root: None,
  4612. should_contain: true,
  4613. });
  4614. }
  4615. // Scenario 3: last full snapshot is EQUAL TO zero lamport account slot
  4616. // In this scenario always purge, as it's the same as Scenario 2.
  4617. {
  4618. let latest_full_snapshot_slot = Some(slot);
  4619. do_test(TestParameters {
  4620. latest_full_snapshot_slot,
  4621. max_clean_root: latest_full_snapshot_slot,
  4622. should_contain: true,
  4623. });
  4624. do_test(TestParameters {
  4625. latest_full_snapshot_slot,
  4626. max_clean_root: latest_full_snapshot_slot.map(|s| s + 1),
  4627. should_contain: true,
  4628. });
  4629. do_test(TestParameters {
  4630. latest_full_snapshot_slot,
  4631. max_clean_root: None,
  4632. should_contain: true,
  4633. });
  4634. }
  4635. // Scenario 4: last full snapshot is LESS THAN zero lamport account slot
  4636. // In this scenario do *not* purge, except when `should_filter_for_incremental_snapshots`
  4637. // is false
  4638. {
  4639. let latest_full_snapshot_slot = Some(slot - 1);
  4640. do_test(TestParameters {
  4641. latest_full_snapshot_slot,
  4642. max_clean_root: latest_full_snapshot_slot,
  4643. should_contain: true,
  4644. });
  4645. do_test(TestParameters {
  4646. latest_full_snapshot_slot,
  4647. max_clean_root: latest_full_snapshot_slot.map(|s| s + 1),
  4648. should_contain: false,
  4649. });
  4650. do_test(TestParameters {
  4651. latest_full_snapshot_slot,
  4652. max_clean_root: None,
  4653. should_contain: false,
  4654. });
  4655. }
  4656. }
  4657. impl AccountsDb {
  4658. /// helper function to test unref_accounts clean_dead_slots_from_accounts_index
  4659. fn test_unref(
  4660. &self,
  4661. call_clean: bool,
  4662. purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
  4663. purged_stored_account_slots: &mut AccountSlots,
  4664. pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex,
  4665. ) {
  4666. self.unref_accounts(
  4667. purged_slot_pubkeys,
  4668. purged_stored_account_slots,
  4669. pubkeys_removed_from_accounts_index,
  4670. );
  4671. if call_clean {
  4672. let empty_vec = Vec::default();
  4673. self.clean_dead_slots_from_accounts_index(empty_vec.iter());
  4674. }
  4675. }
  4676. }
  4677. #[test]
  4678. /// test 'unref' parameter 'pubkeys_removed_from_accounts_index'
  4679. fn test_unref_pubkeys_removed_from_accounts_index() {
  4680. let slot1 = 1;
  4681. let pk1 = Pubkey::from([1; 32]);
  4682. for already_removed in [false, true] {
  4683. let mut pubkeys_removed_from_accounts_index = PubkeysRemovedFromAccountsIndex::default();
  4684. if already_removed {
  4685. pubkeys_removed_from_accounts_index.insert(pk1);
  4686. }
  4687. // pk1 in slot1, purge it
  4688. let db = AccountsDb::new_single_for_tests();
  4689. let mut purged_slot_pubkeys = HashSet::default();
  4690. purged_slot_pubkeys.insert((slot1, pk1));
  4691. let mut reclaims = ReclaimsSlotList::default();
  4692. db.accounts_index.upsert(
  4693. slot1,
  4694. slot1,
  4695. &pk1,
  4696. &AccountSharedData::default(),
  4697. &AccountSecondaryIndexes::default(),
  4698. AccountInfo::default(),
  4699. &mut reclaims,
  4700. UpsertReclaim::IgnoreReclaims,
  4701. );
  4702. let mut purged_stored_account_slots = AccountSlots::default();
  4703. db.test_unref(
  4704. false,
  4705. purged_slot_pubkeys,
  4706. &mut purged_stored_account_slots,
  4707. &pubkeys_removed_from_accounts_index,
  4708. );
  4709. assert_eq!(
  4710. vec![(pk1, vec![slot1].into_iter().collect::<IntSet<_>>())],
  4711. purged_stored_account_slots.into_iter().collect::<Vec<_>>()
  4712. );
  4713. let expected = RefCount::from(already_removed);
  4714. db.assert_ref_count(&pk1, expected);
  4715. }
  4716. }
  4717. #[test]
  4718. fn test_unref_accounts() {
  4719. let pubkeys_removed_from_accounts_index = PubkeysRemovedFromAccountsIndex::default();
  4720. for call_clean in [true, false] {
  4721. {
  4722. let db = AccountsDb::new_single_for_tests();
  4723. let mut purged_stored_account_slots = AccountSlots::default();
  4724. db.test_unref(
  4725. call_clean,
  4726. HashSet::default(),
  4727. &mut purged_stored_account_slots,
  4728. &pubkeys_removed_from_accounts_index,
  4729. );
  4730. assert!(purged_stored_account_slots.is_empty());
  4731. }
  4732. let slot1 = 1;
  4733. let slot2 = 2;
  4734. let pk1 = Pubkey::from([1; 32]);
  4735. let pk2 = Pubkey::from([2; 32]);
  4736. {
  4737. // pk1 in slot1, purge it
  4738. let db = AccountsDb::new_single_for_tests();
  4739. let mut purged_slot_pubkeys = HashSet::default();
  4740. purged_slot_pubkeys.insert((slot1, pk1));
  4741. let mut reclaims = ReclaimsSlotList::default();
  4742. db.accounts_index.upsert(
  4743. slot1,
  4744. slot1,
  4745. &pk1,
  4746. &AccountSharedData::default(),
  4747. &AccountSecondaryIndexes::default(),
  4748. AccountInfo::default(),
  4749. &mut reclaims,
  4750. UpsertReclaim::IgnoreReclaims,
  4751. );
  4752. let mut purged_stored_account_slots = AccountSlots::default();
  4753. db.test_unref(
  4754. call_clean,
  4755. purged_slot_pubkeys,
  4756. &mut purged_stored_account_slots,
  4757. &pubkeys_removed_from_accounts_index,
  4758. );
  4759. assert_eq!(
  4760. vec![(pk1, vec![slot1].into_iter().collect::<IntSet<_>>())],
  4761. purged_stored_account_slots.into_iter().collect::<Vec<_>>()
  4762. );
  4763. db.assert_ref_count(&pk1, 0);
  4764. }
  4765. {
  4766. let db = AccountsDb::new_single_for_tests();
  4767. let mut purged_stored_account_slots = AccountSlots::default();
  4768. let mut purged_slot_pubkeys = HashSet::default();
  4769. let mut reclaims = ReclaimsSlotList::default();
  4770. // pk1 and pk2 both in slot1 and slot2, so each has refcount of 2
  4771. for slot in [slot1, slot2] {
  4772. for pk in [pk1, pk2] {
  4773. db.accounts_index.upsert(
  4774. slot,
  4775. slot,
  4776. &pk,
  4777. &AccountSharedData::default(),
  4778. &AccountSecondaryIndexes::default(),
  4779. AccountInfo::default(),
  4780. &mut reclaims,
  4781. UpsertReclaim::IgnoreReclaims,
  4782. );
  4783. }
  4784. }
  4785. // purge pk1 from both 1 and 2 and pk2 from slot 1
  4786. let purges = vec![(slot1, pk1), (slot1, pk2), (slot2, pk1)];
  4787. purges.into_iter().for_each(|(slot, pk)| {
  4788. purged_slot_pubkeys.insert((slot, pk));
  4789. });
  4790. db.test_unref(
  4791. call_clean,
  4792. purged_slot_pubkeys,
  4793. &mut purged_stored_account_slots,
  4794. &pubkeys_removed_from_accounts_index,
  4795. );
  4796. for (pk, slots) in [(pk1, vec![slot1, slot2]), (pk2, vec![slot1])] {
  4797. let result = purged_stored_account_slots.remove(&pk).unwrap();
  4798. assert_eq!(result, slots.into_iter().collect::<IntSet<_>>());
  4799. }
  4800. assert!(purged_stored_account_slots.is_empty());
  4801. db.assert_ref_count(&pk1, 0);
  4802. db.assert_ref_count(&pk2, 1);
  4803. }
  4804. }
  4805. }
  4806. define_accounts_db_test!(test_many_unrefs, |db| {
  4807. let mut purged_stored_account_slots = AccountSlots::default();
  4808. let mut reclaims = ReclaimsSlotList::default();
  4809. let pk1 = Pubkey::from([1; 32]);
  4810. // make sure we have > 1 batch. Bigger numbers cost more in test time here.
  4811. let n = (UNREF_ACCOUNTS_BATCH_SIZE + 1) as Slot;
  4812. // put the pubkey into the acct idx in 'n' slots
  4813. let purged_slot_pubkeys = (0..n)
  4814. .map(|slot| {
  4815. db.accounts_index.upsert(
  4816. slot,
  4817. slot,
  4818. &pk1,
  4819. &AccountSharedData::default(),
  4820. &AccountSecondaryIndexes::default(),
  4821. AccountInfo::default(),
  4822. &mut reclaims,
  4823. UpsertReclaim::IgnoreReclaims,
  4824. );
  4825. (slot, pk1)
  4826. })
  4827. .collect::<HashSet<_>>();
  4828. assert_eq!(
  4829. db.accounts_index.ref_count_from_storage(&pk1),
  4830. n as RefCount,
  4831. );
  4832. // unref all 'n' slots
  4833. db.unref_accounts(
  4834. purged_slot_pubkeys,
  4835. &mut purged_stored_account_slots,
  4836. &HashSet::default(),
  4837. );
  4838. assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0);
  4839. });
  4840. define_accounts_db_test!(test_mark_dirty_dead_stores_empty, |db| {
  4841. let slot = 0;
  4842. for add_dirty_stores in [false, true] {
  4843. let dead_storages = db.mark_dirty_dead_stores(slot, add_dirty_stores, None, false);
  4844. assert!(dead_storages.is_empty());
  4845. assert!(db.dirty_stores.is_empty());
  4846. }
  4847. });
  4848. #[test]
  4849. fn test_mark_dirty_dead_stores_no_shrink_in_progress() {
  4850. // None for shrink_in_progress, 1 existing store at the slot
  4851. // There should be no more append vecs at that slot after the call to mark_dirty_dead_stores.
  4852. // This tests the case where this slot was combined into an ancient append vec from an older slot and
  4853. // there is no longer an append vec at this slot.
  4854. for add_dirty_stores in [false, true] {
  4855. let slot = 0;
  4856. let db = AccountsDb::new_single_for_tests();
  4857. let size = 1;
  4858. let existing_store = db.create_and_insert_store(slot, size, "test");
  4859. let old_id = existing_store.id();
  4860. let dead_storages = db.mark_dirty_dead_stores(slot, add_dirty_stores, None, false);
  4861. assert!(db.storage.get_slot_storage_entry(slot).is_none());
  4862. assert_eq!(dead_storages.len(), 1);
  4863. assert_eq!(dead_storages.first().unwrap().id(), old_id);
  4864. if add_dirty_stores {
  4865. assert_eq!(1, db.dirty_stores.len());
  4866. let dirty_store = db.dirty_stores.get(&slot).unwrap();
  4867. assert_eq!(dirty_store.id(), old_id);
  4868. } else {
  4869. assert!(db.dirty_stores.is_empty());
  4870. }
  4871. assert!(db.storage.is_empty_entry(slot));
  4872. }
  4873. }
  4874. #[test]
  4875. fn test_mark_dirty_dead_stores() {
  4876. let slot = 0;
  4877. // use shrink_in_progress to cause us to drop the initial store
  4878. for add_dirty_stores in [false, true] {
  4879. let db = AccountsDb::new_single_for_tests();
  4880. let size = 1;
  4881. let old_store = db.create_and_insert_store(slot, size, "test");
  4882. let old_id = old_store.id();
  4883. let shrink_in_progress = db.get_store_for_shrink(slot, 100);
  4884. let dead_storages =
  4885. db.mark_dirty_dead_stores(slot, add_dirty_stores, Some(shrink_in_progress), false);
  4886. assert!(db.storage.get_slot_storage_entry(slot).is_some());
  4887. assert_eq!(dead_storages.len(), 1);
  4888. assert_eq!(dead_storages.first().unwrap().id(), old_id);
  4889. if add_dirty_stores {
  4890. assert_eq!(1, db.dirty_stores.len());
  4891. let dirty_store = db.dirty_stores.get(&slot).unwrap();
  4892. assert_eq!(dirty_store.id(), old_id);
  4893. } else {
  4894. assert!(db.dirty_stores.is_empty());
  4895. }
  4896. assert!(db.storage.get_slot_storage_entry(slot).is_some());
  4897. }
  4898. }
  4899. define_accounts_db_test!(test_add_uncleaned_pubkeys_after_shrink, |db| {
  4900. let slot = 0;
  4901. let pubkey = Pubkey::from([1; 32]);
  4902. db.add_uncleaned_pubkeys_after_shrink(slot, vec![pubkey].into_iter());
  4903. assert_eq!(&*db.uncleaned_pubkeys.get(&slot).unwrap(), &vec![pubkey]);
  4904. });
  4905. #[test]
  4906. fn test_sweep_get_oldest_non_ancient_slot_max() {
  4907. let epoch_schedule = EpochSchedule::default();
  4908. // way into future
  4909. for ancient_append_vec_offset in [
  4910. epoch_schedule.slots_per_epoch,
  4911. epoch_schedule.slots_per_epoch + 1,
  4912. epoch_schedule.slots_per_epoch * 2,
  4913. ] {
  4914. let db = AccountsDb::new_with_config(
  4915. Vec::new(),
  4916. AccountsDbConfig {
  4917. ancient_append_vec_offset: Some(ancient_append_vec_offset as i64),
  4918. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  4919. },
  4920. None,
  4921. Arc::default(),
  4922. );
  4923. // before any roots are added, we expect the oldest non-ancient slot to be 0
  4924. assert_eq!(0, db.get_oldest_non_ancient_slot(&epoch_schedule));
  4925. for max_root_inclusive in [
  4926. 0,
  4927. epoch_schedule.slots_per_epoch,
  4928. epoch_schedule.slots_per_epoch * 2,
  4929. epoch_schedule.slots_per_epoch * 10,
  4930. ] {
  4931. db.add_root(max_root_inclusive);
  4932. // oldest non-ancient will never exceed max_root_inclusive, even if the offset is so large it would mathematically move ancient PAST the newest root
  4933. assert_eq!(
  4934. max_root_inclusive,
  4935. db.get_oldest_non_ancient_slot(&epoch_schedule)
  4936. );
  4937. }
  4938. }
  4939. }
  4940. #[test]
  4941. fn test_sweep_get_oldest_non_ancient_slot() {
  4942. let epoch_schedule = EpochSchedule::default();
  4943. let ancient_append_vec_offset = 50_000;
  4944. let db = AccountsDb::new_with_config(
  4945. Vec::new(),
  4946. AccountsDbConfig {
  4947. ancient_append_vec_offset: Some(ancient_append_vec_offset),
  4948. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  4949. },
  4950. None,
  4951. Arc::default(),
  4952. );
  4953. // before any roots are added, we expect the oldest non-ancient slot to be 0
  4954. assert_eq!(0, db.get_oldest_non_ancient_slot(&epoch_schedule));
  4955. // adding roots until slots_per_epoch +/- ancient_append_vec_offset should still saturate to 0 as oldest non ancient slot
  4956. let max_root_inclusive = AccountsDb::apply_offset_to_slot(0, ancient_append_vec_offset - 1);
  4957. db.add_root(max_root_inclusive);
  4958. // oldest non-ancient will never exceed max_root_inclusive
  4959. assert_eq!(0, db.get_oldest_non_ancient_slot(&epoch_schedule));
  4960. for offset in 0..3u64 {
  4961. let max_root_inclusive = ancient_append_vec_offset as u64 + offset;
  4962. db.add_root(max_root_inclusive);
  4963. assert_eq!(
  4964. 0,
  4965. db.get_oldest_non_ancient_slot(&epoch_schedule),
  4966. "offset: {offset}"
  4967. );
  4968. }
  4969. for offset in 0..3u64 {
  4970. let max_root_inclusive = AccountsDb::apply_offset_to_slot(
  4971. epoch_schedule.slots_per_epoch - 1,
  4972. -ancient_append_vec_offset,
  4973. ) + offset;
  4974. db.add_root(max_root_inclusive);
  4975. assert_eq!(
  4976. offset,
  4977. db.get_oldest_non_ancient_slot(&epoch_schedule),
  4978. "offset: {offset}, max_root_inclusive: {max_root_inclusive}"
  4979. );
  4980. }
  4981. }
  4982. #[test]
  4983. fn test_sweep_get_oldest_non_ancient_slot2() {
  4984. // note that this test has to worry about saturation at 0 as we subtract `slots_per_epoch` and `ancient_append_vec_offset`
  4985. let epoch_schedule = EpochSchedule::default();
  4986. for ancient_append_vec_offset in [-10_000i64, 50_000] {
  4987. // at `starting_slot_offset`=0, with a negative `ancient_append_vec_offset`, we expect saturation to 0
  4988. // big enough to avoid all saturation issues.
  4989. let avoid_saturation = 1_000_000;
  4990. assert!(
  4991. avoid_saturation
  4992. > epoch_schedule.slots_per_epoch + ancient_append_vec_offset.unsigned_abs()
  4993. );
  4994. for starting_slot_offset in [0, avoid_saturation] {
  4995. let db = AccountsDb::new_with_config(
  4996. Vec::new(),
  4997. AccountsDbConfig {
  4998. ancient_append_vec_offset: Some(ancient_append_vec_offset),
  4999. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  5000. },
  5001. None,
  5002. Arc::default(),
  5003. );
  5004. // before any roots are added, we expect the oldest non-ancient slot to be 0
  5005. assert_eq!(0, db.get_oldest_non_ancient_slot(&epoch_schedule));
  5006. let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap();
  5007. assert_ne!(ancient_append_vec_offset, 0);
  5008. // try a few values to simulate a real validator
  5009. for inc in [0, 1, 2, 3, 4, 5, 8, 10, 10, 11, 200, 201, 1_000] {
  5010. // oldest non-ancient slot is 1 greater than first ancient slot
  5011. let completed_slot = epoch_schedule.slots_per_epoch + inc + starting_slot_offset;
  5012. // test get_oldest_non_ancient_slot, which is based off the largest root
  5013. db.add_root(completed_slot);
  5014. let expected_oldest_non_ancient_slot = AccountsDb::apply_offset_to_slot(
  5015. AccountsDb::apply_offset_to_slot(
  5016. completed_slot,
  5017. -((epoch_schedule.slots_per_epoch as i64).saturating_sub(1)),
  5018. ),
  5019. ancient_append_vec_offset,
  5020. );
  5021. assert_eq!(
  5022. expected_oldest_non_ancient_slot,
  5023. db.get_oldest_non_ancient_slot(&epoch_schedule)
  5024. );
  5025. }
  5026. }
  5027. }
  5028. }
  5029. define_accounts_db_test!(test_get_sorted_potential_ancient_slots, |db| {
  5030. let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap();
  5031. let epoch_schedule = EpochSchedule::default();
  5032. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5033. assert!(db
  5034. .get_sorted_potential_ancient_slots(oldest_non_ancient_slot)
  5035. .is_empty());
  5036. let root1 = DEFAULT_MAX_ANCIENT_STORAGES as u64 + ancient_append_vec_offset as u64 + 1;
  5037. db.add_root(root1);
  5038. let root2 = root1 + 1;
  5039. db.add_root(root2);
  5040. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5041. assert!(db
  5042. .get_sorted_potential_ancient_slots(oldest_non_ancient_slot)
  5043. .is_empty());
  5044. let completed_slot = epoch_schedule.slots_per_epoch;
  5045. db.accounts_index.add_root(AccountsDb::apply_offset_to_slot(
  5046. completed_slot,
  5047. ancient_append_vec_offset,
  5048. ));
  5049. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5050. // get_sorted_potential_ancient_slots uses 'less than' as opposed to 'less or equal'
  5051. // so, we need to get more than an epoch away to get the first valid root
  5052. assert!(db
  5053. .get_sorted_potential_ancient_slots(oldest_non_ancient_slot)
  5054. .is_empty());
  5055. let completed_slot = epoch_schedule.slots_per_epoch + root1;
  5056. db.accounts_index.add_root(AccountsDb::apply_offset_to_slot(
  5057. completed_slot,
  5058. ancient_append_vec_offset,
  5059. ));
  5060. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5061. assert_eq!(
  5062. db.get_sorted_potential_ancient_slots(oldest_non_ancient_slot),
  5063. vec![root1, root2]
  5064. );
  5065. let completed_slot = epoch_schedule.slots_per_epoch + root2;
  5066. db.accounts_index.add_root(AccountsDb::apply_offset_to_slot(
  5067. completed_slot,
  5068. ancient_append_vec_offset,
  5069. ));
  5070. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5071. assert_eq!(
  5072. db.get_sorted_potential_ancient_slots(oldest_non_ancient_slot),
  5073. vec![root1, root2]
  5074. );
  5075. db.accounts_index
  5076. .roots_tracker
  5077. .write()
  5078. .unwrap()
  5079. .alive_roots
  5080. .remove(&root1);
  5081. let oldest_non_ancient_slot = db.get_oldest_non_ancient_slot(&epoch_schedule);
  5082. assert_eq!(
  5083. db.get_sorted_potential_ancient_slots(oldest_non_ancient_slot),
  5084. vec![root2]
  5085. );
  5086. });
  5087. #[test]
  5088. fn test_shrink_collect_simple() {
  5089. agave_logger::setup();
  5090. let account_counts = [
  5091. 1,
  5092. SHRINK_COLLECT_CHUNK_SIZE,
  5093. SHRINK_COLLECT_CHUNK_SIZE + 1,
  5094. SHRINK_COLLECT_CHUNK_SIZE * 2,
  5095. ];
  5096. // 2 = append_opposite_alive_account + append_opposite_zero_lamport_account
  5097. let max_appended_accounts = 2;
  5098. let max_num_accounts = *account_counts.iter().max().unwrap();
  5099. let pubkeys = (0..(max_num_accounts + max_appended_accounts))
  5100. .map(|_| solana_pubkey::new_rand())
  5101. .collect::<Vec<_>>();
  5102. // write accounts, maybe remove from index
  5103. // check shrink_collect results
  5104. for lamports in [0, 1] {
  5105. for space in [0, 8] {
  5106. if lamports == 0 && space != 0 {
  5107. // illegal - zero lamport accounts are written with 0 space
  5108. continue;
  5109. }
  5110. for alive in [false, true] {
  5111. for append_opposite_alive_account in [false, true] {
  5112. for append_opposite_zero_lamport_account in [true, false] {
  5113. for mut account_count in account_counts {
  5114. let mut normal_account_count = account_count;
  5115. let mut pubkey_opposite_zero_lamports = None;
  5116. if append_opposite_zero_lamport_account {
  5117. pubkey_opposite_zero_lamports = Some(&pubkeys[account_count]);
  5118. normal_account_count += 1;
  5119. account_count += 1;
  5120. }
  5121. let mut pubkey_opposite_alive = None;
  5122. if append_opposite_alive_account {
  5123. // this needs to happen AFTER append_opposite_zero_lamport_account
  5124. pubkey_opposite_alive = Some(&pubkeys[account_count]);
  5125. account_count += 1;
  5126. }
  5127. debug!(
  5128. "space: {space}, lamports: {lamports}, alive: {alive}, \
  5129. account_count: {account_count}, append_opposite_alive_account: \
  5130. {append_opposite_alive_account}, \
  5131. append_opposite_zero_lamport_account: \
  5132. {append_opposite_zero_lamport_account}, normal_account_count: \
  5133. {normal_account_count}"
  5134. );
  5135. let db = AccountsDb::new_single_for_tests();
  5136. let slot5 = 5;
  5137. // don't do special zero lamport account handling
  5138. db.set_latest_full_snapshot_slot(0);
  5139. let mut account = AccountSharedData::new(
  5140. lamports,
  5141. space,
  5142. AccountSharedData::default().owner(),
  5143. );
  5144. let mut to_purge = Vec::default();
  5145. for pubkey in pubkeys.iter().take(account_count) {
  5146. // store in append vec and index
  5147. let old_lamports = account.lamports();
  5148. if Some(pubkey) == pubkey_opposite_zero_lamports {
  5149. account.set_lamports(u64::from(old_lamports == 0));
  5150. }
  5151. db.store_for_tests((slot5, [(pubkey, &account)].as_slice()));
  5152. account.set_lamports(old_lamports);
  5153. let mut alive = alive;
  5154. if append_opposite_alive_account
  5155. && Some(pubkey) == pubkey_opposite_alive
  5156. {
  5157. // invert this for one special pubkey
  5158. alive = !alive;
  5159. }
  5160. if !alive {
  5161. // remove from index so pubkey is 'dead'
  5162. to_purge.push(*pubkey);
  5163. }
  5164. }
  5165. db.add_root_and_flush_write_cache(slot5);
  5166. to_purge.iter().for_each(|pubkey| {
  5167. db.accounts_index.purge_exact(
  5168. pubkey,
  5169. [slot5].into_iter().collect::<HashSet<_>>(),
  5170. &mut ReclaimsSlotList::new(),
  5171. );
  5172. });
  5173. let storage = db.get_storage_for_slot(slot5).unwrap();
  5174. let mut unique_accounts = db
  5175. .get_unique_accounts_from_storage_for_shrink(
  5176. &storage,
  5177. &ShrinkStats::default(),
  5178. );
  5179. let shrink_collect = db.shrink_collect::<AliveAccounts<'_>>(
  5180. &storage,
  5181. &mut unique_accounts,
  5182. &ShrinkStats::default(),
  5183. );
  5184. let expect_single_opposite_alive_account =
  5185. if append_opposite_alive_account {
  5186. vec![*pubkey_opposite_alive.unwrap()]
  5187. } else {
  5188. vec![]
  5189. };
  5190. let expected_alive_accounts = if alive {
  5191. pubkeys[..normal_account_count]
  5192. .iter()
  5193. .filter(|p| Some(p) != pubkey_opposite_alive.as_ref())
  5194. .sorted()
  5195. .cloned()
  5196. .collect::<Vec<_>>()
  5197. } else {
  5198. expect_single_opposite_alive_account.clone()
  5199. };
  5200. let expected_unrefed = if alive {
  5201. expect_single_opposite_alive_account.clone()
  5202. } else {
  5203. pubkeys[..normal_account_count]
  5204. .iter()
  5205. .sorted()
  5206. .cloned()
  5207. .collect::<Vec<_>>()
  5208. };
  5209. assert_eq!(shrink_collect.slot, slot5);
  5210. assert_eq!(
  5211. shrink_collect
  5212. .alive_accounts
  5213. .accounts
  5214. .iter()
  5215. .map(|account| *account.pubkey())
  5216. .sorted()
  5217. .collect::<Vec<_>>(),
  5218. expected_alive_accounts
  5219. );
  5220. assert_eq!(
  5221. shrink_collect
  5222. .pubkeys_to_unref
  5223. .iter()
  5224. .sorted()
  5225. .cloned()
  5226. .cloned()
  5227. .collect::<Vec<_>>(),
  5228. expected_unrefed
  5229. );
  5230. let alive_total_one_account = 136 + space;
  5231. if alive {
  5232. let mut expected_alive_total_bytes =
  5233. alive_total_one_account * normal_account_count;
  5234. if append_opposite_zero_lamport_account {
  5235. // zero lamport accounts store size=0 data
  5236. expected_alive_total_bytes -= space;
  5237. }
  5238. assert_eq!(
  5239. shrink_collect.alive_total_bytes,
  5240. expected_alive_total_bytes
  5241. );
  5242. } else if append_opposite_alive_account {
  5243. assert_eq!(
  5244. shrink_collect.alive_total_bytes,
  5245. alive_total_one_account
  5246. );
  5247. } else {
  5248. assert_eq!(shrink_collect.alive_total_bytes, 0);
  5249. }
  5250. // expected_capacity is determined by what size append vec gets created when the write cache is flushed to an append vec.
  5251. let mut expected_capacity =
  5252. (account_count * aligned_stored_size(space)) as u64;
  5253. if append_opposite_zero_lamport_account && space != 0 {
  5254. // zero lamport accounts always write space = 0
  5255. expected_capacity -= space as u64;
  5256. }
  5257. assert_eq!(shrink_collect.capacity, expected_capacity);
  5258. assert_eq!(shrink_collect.total_starting_accounts, account_count);
  5259. let mut expected_all_are_zero_lamports = lamports == 0;
  5260. if !append_opposite_alive_account {
  5261. expected_all_are_zero_lamports |= !alive;
  5262. }
  5263. if append_opposite_zero_lamport_account && lamports == 0 && alive {
  5264. expected_all_are_zero_lamports = !expected_all_are_zero_lamports;
  5265. }
  5266. assert_eq!(
  5267. shrink_collect.all_are_zero_lamports,
  5268. expected_all_are_zero_lamports
  5269. );
  5270. }
  5271. }
  5272. }
  5273. }
  5274. }
  5275. }
  5276. }
  5277. #[test]
  5278. fn test_shrink_collect_with_obsolete_accounts() {
  5279. agave_logger::setup();
  5280. let account_count = 100;
  5281. let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique)
  5282. .take(account_count)
  5283. .collect();
  5284. let db = AccountsDb::new_single_for_tests();
  5285. let slot = 5;
  5286. let mut account = AccountSharedData::new(
  5287. 100, // lamports
  5288. 128, // space
  5289. AccountSharedData::default().owner(),
  5290. );
  5291. let mut regular_pubkeys = Vec::new();
  5292. let mut obsolete_pubkeys = Vec::new();
  5293. let mut zero_lamport_pubkeys = Vec::new();
  5294. let mut unref_pubkeys = Vec::new();
  5295. for (i, pubkey) in pubkeys.iter().enumerate() {
  5296. if i % 3 == 0 {
  5297. // Mark third account as zero lamport
  5298. // These will be removed during shrink
  5299. account.set_lamports(0);
  5300. zero_lamport_pubkeys.push(*pubkey);
  5301. } else {
  5302. // Regular accounts that should be kept
  5303. account.set_lamports(200);
  5304. regular_pubkeys.push(*pubkey);
  5305. }
  5306. db.store_for_tests((slot, [(pubkey, &account)].as_slice()));
  5307. }
  5308. // Flush the cache
  5309. db.add_root_and_flush_write_cache(slot);
  5310. let storage = db.get_and_assert_single_storage(slot);
  5311. for (i, pubkey) in pubkeys.iter().enumerate() {
  5312. // Mark Some accounts obsolete. These will include zero lamport and non zero lamport accounts
  5313. if i % 5 == 0 {
  5314. // Lookup the pubkey in the database and find the AccountInfo
  5315. db.accounts_index
  5316. .get_with_and_then(pubkey, None, None, false, |account_info| {
  5317. db.remove_dead_accounts(
  5318. [account_info].iter(),
  5319. None,
  5320. MarkAccountsObsolete::Yes(slot),
  5321. );
  5322. });
  5323. obsolete_pubkeys.push(*pubkey);
  5324. } else if i % 4 == 0 {
  5325. // Purge accounts via clean and ensure that they will be unreffed.
  5326. db.accounts_index.purge_exact(
  5327. pubkey,
  5328. [slot].into_iter().collect::<HashSet<_>>(),
  5329. &mut ReclaimsSlotList::new(),
  5330. );
  5331. unref_pubkeys.push(*pubkey);
  5332. }
  5333. }
  5334. let mut unique_accounts =
  5335. db.get_unique_accounts_from_storage_for_shrink(&storage, &ShrinkStats::default());
  5336. let shrink_collect = db.shrink_collect::<AliveAccounts<'_>>(
  5337. &storage,
  5338. &mut unique_accounts,
  5339. &ShrinkStats::default(),
  5340. );
  5341. assert_eq!(shrink_collect.slot, slot);
  5342. // Ensure that the keys to unref does not include the obsolete accounts and only includes the unreferenced accounts
  5343. assert_eq!(
  5344. shrink_collect
  5345. .pubkeys_to_unref
  5346. .into_iter()
  5347. .collect::<HashSet<_>>(),
  5348. unref_pubkeys.iter().clone().collect::<HashSet<_>>()
  5349. );
  5350. // Ensure that the obsolete accounts and accounts to unref are not in the alive list
  5351. assert_eq!(
  5352. shrink_collect
  5353. .alive_accounts
  5354. .accounts
  5355. .into_iter()
  5356. .map(|account| *account.pubkey())
  5357. .sorted()
  5358. .collect::<Vec<Pubkey>>(),
  5359. regular_pubkeys
  5360. .into_iter()
  5361. .filter(|account| !unref_pubkeys.contains(account))
  5362. .filter(|account| !obsolete_pubkeys.contains(account))
  5363. .sorted()
  5364. .collect::<Vec<Pubkey>>()
  5365. );
  5366. }
  5367. pub(crate) const CAN_RANDOMLY_SHRINK_FALSE: bool = false;
  5368. define_accounts_db_test!(test_combine_ancient_slots_empty, |db| {
  5369. // empty slots
  5370. db.combine_ancient_slots_packed(Vec::default(), CAN_RANDOMLY_SHRINK_FALSE);
  5371. });
  5372. #[test]
  5373. fn test_combine_ancient_slots_simple() {
  5374. _ = get_one_ancient_append_vec_and_others(0);
  5375. }
  5376. fn get_all_accounts_from_storages<'a>(
  5377. storages: impl Iterator<Item = &'a Arc<AccountStorageEntry>>,
  5378. ) -> Vec<(Pubkey, AccountSharedData)> {
  5379. let mut reader = append_vec::new_scan_accounts_reader();
  5380. storages
  5381. .flat_map(|storage| {
  5382. let mut vec = Vec::default();
  5383. storage
  5384. .accounts
  5385. .scan_accounts(&mut reader, |_offset, account| {
  5386. vec.push((*account.pubkey(), create_account_shared_data(&account)));
  5387. })
  5388. .expect("must scan accounts storage");
  5389. // make sure scan_pubkeys results match
  5390. // Note that we assume traversals are both in the same order, but this doesn't have to be true.
  5391. let mut compare = Vec::default();
  5392. storage
  5393. .accounts
  5394. .scan_pubkeys(|k| {
  5395. compare.push(*k);
  5396. })
  5397. .expect("must scan accounts storage");
  5398. assert_eq!(compare, vec.iter().map(|(k, _)| *k).collect::<Vec<_>>());
  5399. vec
  5400. })
  5401. .collect::<Vec<_>>()
  5402. }
  5403. pub(crate) fn get_all_accounts(
  5404. db: &AccountsDb,
  5405. slots: impl Iterator<Item = Slot>,
  5406. ) -> Vec<(Pubkey, AccountSharedData)> {
  5407. slots
  5408. .filter_map(|slot| {
  5409. let storage = db.storage.get_slot_storage_entry(slot);
  5410. storage.map(|storage| get_all_accounts_from_storages(std::iter::once(&storage)))
  5411. })
  5412. .flatten()
  5413. .collect::<Vec<_>>()
  5414. }
  5415. #[track_caller]
  5416. pub(crate) fn compare_all_accounts(
  5417. one: &[(Pubkey, AccountSharedData)],
  5418. two: &[(Pubkey, AccountSharedData)],
  5419. ) {
  5420. let mut failures = 0;
  5421. let mut two_indexes = (0..two.len()).collect::<Vec<_>>();
  5422. one.iter().for_each(|(pubkey, account)| {
  5423. for i in 0..two_indexes.len() {
  5424. let pubkey2 = two[two_indexes[i]].0;
  5425. if pubkey2 == *pubkey {
  5426. if !accounts_equal(account, &two[two_indexes[i]].1) {
  5427. failures += 1;
  5428. }
  5429. two_indexes.remove(i);
  5430. break;
  5431. }
  5432. }
  5433. });
  5434. // helper method to reduce the volume of logged data to help identify differences
  5435. // modify this when you hit a failure
  5436. let clean = |accounts: &[(Pubkey, AccountSharedData)]| {
  5437. accounts
  5438. .iter()
  5439. .map(|(_pubkey, account)| account.lamports())
  5440. .collect::<Vec<_>>()
  5441. };
  5442. assert_eq!(
  5443. failures,
  5444. 0,
  5445. "one: {:?}, two: {:?}, two_indexes: {:?}",
  5446. clean(one),
  5447. clean(two),
  5448. two_indexes,
  5449. );
  5450. assert!(
  5451. two_indexes.is_empty(),
  5452. "one: {one:?}, two: {two:?}, two_indexes: {two_indexes:?}"
  5453. );
  5454. }
  5455. pub fn get_account_from_account_from_storage(
  5456. account: &AccountFromStorage,
  5457. db: &AccountsDb,
  5458. slot: Slot,
  5459. ) -> AccountSharedData {
  5460. let storage = db
  5461. .storage
  5462. .get_slot_storage_entry_shrinking_in_progress_ok(slot)
  5463. .unwrap();
  5464. storage
  5465. .accounts
  5466. .get_account_shared_data(account.index_info.offset())
  5467. .unwrap()
  5468. }
  5469. fn populate_index(db: &AccountsDb, slots: Range<Slot>) {
  5470. slots.into_iter().for_each(|slot| {
  5471. if let Some(storage) = db.get_storage_for_slot(slot) {
  5472. let mut reader = append_vec::new_scan_accounts_reader();
  5473. storage
  5474. .accounts
  5475. .scan_accounts(&mut reader, |offset, account| {
  5476. let info = AccountInfo::new(
  5477. StorageLocation::AppendVec(storage.id(), offset),
  5478. account.is_zero_lamport(),
  5479. );
  5480. db.accounts_index.upsert(
  5481. slot,
  5482. slot,
  5483. account.pubkey(),
  5484. &account,
  5485. &AccountSecondaryIndexes::default(),
  5486. info,
  5487. &mut ReclaimsSlotList::new(),
  5488. UpsertReclaim::IgnoreReclaims,
  5489. );
  5490. })
  5491. .expect("must scan accounts storage");
  5492. }
  5493. })
  5494. }
  5495. pub(crate) fn remove_account_for_tests(storage: &AccountStorageEntry, num_bytes: usize) {
  5496. storage.remove_accounts(num_bytes, 1);
  5497. }
  5498. pub(crate) fn create_storages_and_update_index(
  5499. db: &AccountsDb,
  5500. tf: Option<&TempFile>,
  5501. starting_slot: Slot,
  5502. num_slots: usize,
  5503. alive: bool,
  5504. account_data_size: Option<u64>,
  5505. ) {
  5506. if num_slots == 0 {
  5507. return;
  5508. }
  5509. let local_tf = (tf.is_none()).then(|| {
  5510. crate::append_vec::test_utils::get_append_vec_path("create_storages_and_update_index")
  5511. });
  5512. let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap());
  5513. let starting_id = db
  5514. .storage
  5515. .iter()
  5516. .map(|storage| storage.1.id())
  5517. .max()
  5518. .unwrap_or(999);
  5519. for i in 0..num_slots {
  5520. let id = starting_id + (i as AccountsFileId);
  5521. let pubkey1 = solana_pubkey::new_rand();
  5522. let storage = sample_storage_with_entries_id(
  5523. tf,
  5524. starting_slot + (i as Slot),
  5525. &pubkey1,
  5526. id,
  5527. alive,
  5528. account_data_size,
  5529. db.storage_access(),
  5530. );
  5531. insert_store(db, Arc::clone(&storage));
  5532. }
  5533. let storage = db.get_storage_for_slot(starting_slot).unwrap();
  5534. let created_accounts = db.get_unique_accounts_from_storage(&storage);
  5535. assert_eq!(created_accounts.stored_accounts.len(), 1);
  5536. if alive {
  5537. populate_index(db, starting_slot..(starting_slot + (num_slots as Slot) + 1));
  5538. }
  5539. }
  5540. pub(crate) fn create_db_with_storages_and_index(
  5541. alive: bool,
  5542. num_slots: usize,
  5543. account_data_size: Option<u64>,
  5544. ) -> (AccountsDb, Slot) {
  5545. agave_logger::setup();
  5546. let db = AccountsDb::new_single_for_tests();
  5547. // create a single append vec with a single account in a slot
  5548. // add the pubkey to index if alive
  5549. // call combine_ancient_slots with the slot
  5550. // verify we create an ancient appendvec that has alive accounts and does not have dead accounts
  5551. let slot1 = 1;
  5552. create_storages_and_update_index(&db, None, slot1, num_slots, alive, account_data_size);
  5553. let slot1 = slot1 as Slot;
  5554. (db, slot1)
  5555. }
  5556. fn get_one_ancient_append_vec_and_others_with_account_size(
  5557. num_normal_slots: usize,
  5558. account_data_size: Option<u64>,
  5559. ) -> (AccountsDb, Slot) {
  5560. // We used to test 'alive = false' with the old shrinking algorithm, but
  5561. // not any more with the new shrinking algorithm. 'alive = false' means
  5562. // that we will have account entries that's in the storages but not in
  5563. // accounts-db index. This violate the assumption in accounts-db, which
  5564. // the new shrinking algorithm now depends on. Therefore, we don't test
  5565. // 'alive = false'.
  5566. let alive = true;
  5567. let (db, slot1) =
  5568. create_db_with_storages_and_index(alive, num_normal_slots + 1, account_data_size);
  5569. let storage = db.get_storage_for_slot(slot1).unwrap();
  5570. let created_accounts = db.get_unique_accounts_from_storage(&storage);
  5571. db.combine_ancient_slots_packed(vec![slot1], CAN_RANDOMLY_SHRINK_FALSE);
  5572. let after_store = db.get_storage_for_slot(slot1).unwrap();
  5573. let GetUniqueAccountsResult {
  5574. stored_accounts: after_stored_accounts,
  5575. capacity: after_capacity,
  5576. ..
  5577. } = db.get_unique_accounts_from_storage(&after_store);
  5578. assert!(created_accounts.capacity <= after_capacity);
  5579. assert_eq!(created_accounts.stored_accounts.len(), 1);
  5580. // always 1 account: either we leave the append vec alone if it is all dead
  5581. // or we create a new one and copy into it if account is alive
  5582. assert_eq!(after_stored_accounts.len(), 1);
  5583. (db, slot1)
  5584. }
  5585. fn get_one_ancient_append_vec_and_others(num_normal_slots: usize) -> (AccountsDb, Slot) {
  5586. get_one_ancient_append_vec_and_others_with_account_size(num_normal_slots, None)
  5587. }
  5588. #[test]
  5589. fn test_handle_dropped_roots_for_ancient() {
  5590. agave_logger::setup();
  5591. let db = AccountsDb::new_single_for_tests();
  5592. db.handle_dropped_roots_for_ancient(std::iter::empty::<Slot>());
  5593. let slot0 = 0;
  5594. let dropped_roots = vec![slot0];
  5595. db.accounts_index.add_root(slot0);
  5596. assert!(db.accounts_index.is_alive_root(slot0));
  5597. db.handle_dropped_roots_for_ancient(dropped_roots.into_iter());
  5598. assert!(!db.accounts_index.is_alive_root(slot0));
  5599. }
  5600. fn insert_store(db: &AccountsDb, append_vec: Arc<AccountStorageEntry>) {
  5601. db.storage.insert(append_vec.slot(), append_vec);
  5602. }
  5603. #[test_case(StorageAccess::Mmap)]
  5604. #[test_case(StorageAccess::File)]
  5605. #[should_panic(expected = "self.storage.remove")]
  5606. fn test_handle_dropped_roots_for_ancient_assert(storage_access: StorageAccess) {
  5607. agave_logger::setup();
  5608. let common_store_path = Path::new("");
  5609. let store_file_size = 10_000;
  5610. let entry = Arc::new(AccountStorageEntry::new(
  5611. common_store_path,
  5612. 0,
  5613. 1,
  5614. store_file_size,
  5615. AccountsFileProvider::AppendVec,
  5616. storage_access,
  5617. ));
  5618. let db = AccountsDb::new_single_for_tests();
  5619. let slot0 = 0;
  5620. let dropped_roots = vec![slot0];
  5621. insert_store(&db, entry);
  5622. db.handle_dropped_roots_for_ancient(dropped_roots.into_iter());
  5623. }
  5624. /// Test that `clean` reclaims old accounts when cleaning old storages
  5625. ///
  5626. /// When `clean` constructs candidates from old storages, pubkeys in these storages may have other
  5627. /// newer versions of the accounts in other newer storages *not* explicitly marked to be visited by
  5628. /// `clean`. In this case, `clean` should still reclaim the old versions of these accounts.
  5629. #[test]
  5630. fn test_clean_old_storages_with_reclaims_rooted() {
  5631. // Test is testing clean behaviour that is specific to obsolete accounts disabled
  5632. // Only run in obsolete accounts disabled mode
  5633. let accounts_db = AccountsDb::new_with_config(
  5634. Vec::new(),
  5635. AccountsDbConfig {
  5636. mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
  5637. ..ACCOUNTS_DB_CONFIG_FOR_TESTING
  5638. },
  5639. None,
  5640. Arc::default(),
  5641. );
  5642. let pubkey = Pubkey::new_unique();
  5643. let old_slot = 11;
  5644. let new_slot = 22;
  5645. let slots = [old_slot, new_slot];
  5646. for &slot in &slots {
  5647. let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique());
  5648. // store `pubkey` into multiple slots, and also store another unique pubkey
  5649. // to prevent the whole storage from being marked as dead by `clean`.
  5650. accounts_db.store_for_tests((
  5651. slot,
  5652. [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(),
  5653. ));
  5654. accounts_db.add_root_and_flush_write_cache(slot);
  5655. accounts_db.uncleaned_pubkeys.remove(&slot);
  5656. // ensure this slot is *not* in the dirty_stores nor uncleaned_pubkeys, because we want to
  5657. // test cleaning *old* storages, i.e. when they aren't explicitly marked for cleaning
  5658. assert!(!accounts_db.dirty_stores.contains_key(&slot));
  5659. assert!(!accounts_db.uncleaned_pubkeys.contains_key(&slot));
  5660. }
  5661. // add `old_slot` to the dirty stores list to mimic it being picked up as old
  5662. let old_storage = accounts_db
  5663. .storage
  5664. .get_slot_storage_entry_shrinking_in_progress_ok(old_slot)
  5665. .unwrap();
  5666. accounts_db.dirty_stores.insert(old_slot, old_storage);
  5667. // ensure the slot list for `pubkey` has both the old and new slots
  5668. let slot_list = accounts_db
  5669. .accounts_index
  5670. .get_bin(&pubkey)
  5671. .slot_list_mut(&pubkey, |slot_list| slot_list.clone_list())
  5672. .unwrap();
  5673. assert_eq!(slot_list.len(), slots.len());
  5674. assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter()));
  5675. // `clean` should now reclaim the account in `old_slot`, even though `new_slot` is not
  5676. // explicitly being cleaned
  5677. accounts_db.clean_accounts_for_tests();
  5678. // ensure we've reclaimed the account in `old_slot`
  5679. let slot_list = accounts_db
  5680. .accounts_index
  5681. .get_bin(&pubkey)
  5682. .slot_list_mut(&pubkey, |slot_list| slot_list.clone_list())
  5683. .unwrap();
  5684. assert_eq!(slot_list.len(), 1);
  5685. assert!(slot_list
  5686. .iter()
  5687. .map(|(slot, _)| slot)
  5688. .eq(iter::once(&new_slot)));
  5689. }
  5690. /// Test that `clean` respects rooted vs unrooted slots w.r.t. reclaims
  5691. ///
  5692. /// When an account is in multiple slots, and the latest is unrooted, `clean` should *not* reclaim
  5693. /// all the rooted versions.
  5694. #[test]
  5695. fn test_clean_old_storages_with_reclaims_unrooted() {
  5696. let accounts_db = AccountsDb::new_single_for_tests();
  5697. let pubkey = Pubkey::new_unique();
  5698. let old_slot = 11;
  5699. let new_slot = 22;
  5700. let slots = [old_slot, new_slot];
  5701. for &slot in &slots {
  5702. let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique());
  5703. // store `pubkey` into multiple slots, and also store another unique pubkey
  5704. // to prevent the whole storage from being marked as dead by `clean`.
  5705. accounts_db.store_for_tests((
  5706. slot,
  5707. [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(),
  5708. ));
  5709. }
  5710. // only `old_slot` should be rooted, not `new_slot`
  5711. accounts_db.add_root_and_flush_write_cache(old_slot);
  5712. assert!(accounts_db.accounts_index.is_alive_root(old_slot));
  5713. assert!(!accounts_db.accounts_index.is_alive_root(new_slot));
  5714. // ensure `old_slot` is in uncleaned_pubkeys (but not dirty_stores) so it'll be cleaned
  5715. assert!(accounts_db.uncleaned_pubkeys.contains_key(&old_slot));
  5716. assert!(!accounts_db.dirty_stores.contains_key(&old_slot));
  5717. // and `new_slot` should be in neither
  5718. assert!(!accounts_db.uncleaned_pubkeys.contains_key(&new_slot));
  5719. assert!(!accounts_db.dirty_stores.contains_key(&new_slot));
  5720. // ensure the slot list for `pubkey` has both the old and new slots
  5721. let slot_list = accounts_db
  5722. .accounts_index
  5723. .get_bin(&pubkey)
  5724. .slot_list_mut(&pubkey, |slot_list| slot_list.clone_list())
  5725. .unwrap();
  5726. assert_eq!(slot_list.len(), slots.len());
  5727. assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter()));
  5728. // `clean` should *not* reclaim the account in `old_slot` because `new_slot` is not a root
  5729. accounts_db.clean_accounts_for_tests();
  5730. // ensure we have NOT reclaimed the account in `old_slot`
  5731. let slot_list = accounts_db
  5732. .accounts_index
  5733. .get_bin(&pubkey)
  5734. .slot_list_mut(&pubkey, |slot_list| slot_list.clone_list())
  5735. .unwrap();
  5736. assert_eq!(slot_list.len(), slots.len());
  5737. assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter()));
  5738. }
  5739. /// Ensure the calculating capitalization produces the correct value
  5740. #[test]
  5741. fn test_calculate_capitalization_simple() {
  5742. let accounts_db = AccountsDb::new_single_for_tests();
  5743. accounts_db.store_for_tests((
  5744. 0,
  5745. [(
  5746. &Pubkey::new_unique(),
  5747. &AccountSharedData::new(123, 0, &Pubkey::default()),
  5748. )]
  5749. .as_slice(),
  5750. ));
  5751. accounts_db.store_for_tests((
  5752. 1,
  5753. [(
  5754. &Pubkey::new_unique(),
  5755. &AccountSharedData::new(456, 0, &Pubkey::default()),
  5756. )]
  5757. .as_slice(),
  5758. ));
  5759. assert_eq!(
  5760. accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1),
  5761. 123 + 456,
  5762. );
  5763. }
  5764. /// Ensure that calculating capitalization panics of there is an overflow
  5765. /// while summing balance within a single slot.
  5766. #[test]
  5767. #[should_panic(expected = "capitalization cannot overflow")]
  5768. fn test_calculate_capitalization_overflow_intra_slot() {
  5769. let accounts_db = AccountsDb::new_single_for_tests();
  5770. let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default());
  5771. accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
  5772. accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
  5773. accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0]), 0);
  5774. }
  5775. /// Ensure that calculating capitalization panics of there is an overflow
  5776. /// while summing balance across multiple slots.
  5777. #[test]
  5778. #[should_panic(expected = "capitalization cannot overflow")]
  5779. fn test_calculate_capitalization_overflow_inter_slot() {
  5780. let accounts_db = AccountsDb::new_single_for_tests();
  5781. let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default());
  5782. accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
  5783. accounts_db.store_for_tests((1, [(&Pubkey::new_unique(), &account)].as_slice()));
  5784. accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1);
  5785. }
  5786. #[test]
  5787. fn test_mark_obsolete_accounts_at_startup_none() {
  5788. let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
  5789. let accounts_db = AccountsDb::new_for_tests(paths);
  5790. let slots = 0;
  5791. let pubkeys_with_duplicates_by_bin = vec![];
  5792. let obsolete_stats =
  5793. accounts_db.mark_obsolete_accounts_at_startup(slots, pubkeys_with_duplicates_by_bin);
  5794. assert_eq!(
  5795. obsolete_stats.accounts_marked_obsolete, 0,
  5796. "No accounts should be reclaimed for empty bin"
  5797. );
  5798. }
  5799. #[test]
  5800. fn test_mark_obsolete_accounts_at_startup_purge_slot() {
  5801. let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
  5802. let accounts_db = AccountsDb::new_for_tests(paths);
  5803. let slots = 2;
  5804. let pubkey1 = Pubkey::new_unique();
  5805. let pubkey2 = Pubkey::new_unique();
  5806. let account = AccountSharedData::new(100, 0, &Pubkey::default());
  5807. // Store the same pubkey in multiple slots
  5808. // Store other pubkey in slot0 to ensure slot is not purged
  5809. accounts_db.store_for_tests((0, [(&pubkey1, &account), (&pubkey2, &account)].as_slice()));
  5810. accounts_db.flush_accounts_cache_slot_for_tests(0);
  5811. accounts_db.store_for_tests((1, [(&pubkey1, &account)].as_slice()));
  5812. accounts_db.flush_accounts_cache_slot_for_tests(1);
  5813. accounts_db.store_for_tests((2, [(&pubkey1, &account)].as_slice()));
  5814. accounts_db.flush_accounts_cache_slot_for_tests(2);
  5815. let pubkeys_with_duplicates_by_bin = vec![vec![pubkey1]];
  5816. let obsolete_stats =
  5817. accounts_db.mark_obsolete_accounts_at_startup(slots, pubkeys_with_duplicates_by_bin);
  5818. // Verify that slot 0 has not been purged
  5819. assert!(accounts_db.storage.get_slot_storage_entry(0).is_some());
  5820. // Verify that slot 1 has been purged
  5821. assert!(accounts_db.storage.get_slot_storage_entry(1).is_none());
  5822. // Verify that the pubkey ref1's count is 1
  5823. accounts_db.assert_ref_count(&pubkey1, 1);
  5824. assert_eq!(obsolete_stats.accounts_marked_obsolete, 2);
  5825. }
  5826. #[test]
  5827. fn test_mark_obsolete_accounts_at_startup_multiple_bins() {
  5828. let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
  5829. let accounts_db = AccountsDb::new_for_tests(paths);
  5830. let pubkey1 = Pubkey::from([0; 32]); // Ensure pubkey1 is in bin 0
  5831. let pubkey2 = Pubkey::from([255; 32]); // Ensure pubkey2 is in a different bin
  5832. let account = AccountSharedData::new(100, 0, &Pubkey::default());
  5833. for slot in 0..2 {
  5834. accounts_db.store_for_tests((
  5835. slot,
  5836. [(&pubkey1, &account), (&pubkey2, &account)].as_slice(),
  5837. ));
  5838. accounts_db.flush_accounts_cache_slot_for_tests(slot);
  5839. }
  5840. let pubkeys_with_duplicates_by_bin = vec![vec![pubkey1], vec![pubkey2]];
  5841. let obsolete_stats =
  5842. accounts_db.mark_obsolete_accounts_at_startup(2, pubkeys_with_duplicates_by_bin);
  5843. // Verify that slot 0 has been purged
  5844. assert!(accounts_db.storage.get_slot_storage_entry(0).is_none());
  5845. // Verify that slot 1 has been purged
  5846. assert!(accounts_db.storage.get_slot_storage_entry(1).is_some());
  5847. // Verify that both pubkeys ref_counts are 1
  5848. accounts_db.assert_ref_count(&pubkey1, 1);
  5849. accounts_db.assert_ref_count(&pubkey2, 1);
  5850. // Ensure that stats were accumulated correctly
  5851. assert_eq!(obsolete_stats.accounts_marked_obsolete, 2);
  5852. assert_eq!(obsolete_stats.slots_removed, 1);
  5853. }
  5854. #[test]
  5855. fn test_batch_insert_zero_lamport_single_ref_account_offsets() {
  5856. let accounts = AccountsDb::new_single_for_tests();
  5857. let storage = accounts.create_and_insert_store(1, 100, "test");
  5858. // Test inserting new offsets
  5859. let offsets1 = vec![10, 20, 30];
  5860. let count1 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets1);
  5861. assert_eq!(count1, 3, "Should insert all 3 new offsets");
  5862. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 3);
  5863. // Test inserting some duplicate and some new offsets
  5864. let offsets2 = vec![20, 30, 40, 50]; // 20,30 are duplicates, 40,50 are new
  5865. let count2 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets2);
  5866. assert_eq!(count2, 2, "Should insert only 2 new offsets (40, 50)");
  5867. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
  5868. // Test inserting all duplicates
  5869. let offsets3 = vec![10, 20];
  5870. let count3 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets3);
  5871. assert_eq!(count3, 0, "Should not insert any duplicates");
  5872. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
  5873. // Test inserting empty slice
  5874. let empty_offsets: Vec<usize> = vec![];
  5875. let count4 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&empty_offsets);
  5876. assert_eq!(count4, 0, "Should handle empty slice");
  5877. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
  5878. // Test inserting large batch with mixed duplicates
  5879. let offsets5 = vec![10, 60, 20, 70, 30, 80, 40]; // 10,20,30,40 duplicates, 60,70,80 new
  5880. let count5 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets5);
  5881. assert_eq!(count5, 3, "Should insert only 3 new offsets (60, 70, 80)");
  5882. assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 8);
  5883. }