Recycler.cpp 282 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_AMD64
  7. #include "amd64.h"
  8. #endif
  9. #ifdef _M_ARM
  10. #include "arm.h"
  11. #endif
  12. #ifdef _M_ARM64
  13. #include "arm64.h"
  14. #endif
  15. #include "Core/BinaryFeatureControl.h"
  16. #include "Common/ThreadService.h"
  17. #include "Memory/AutoAllocatorObjectPtr.h"
  18. DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
  19. #ifdef PROFILE_RECYCLER_ALLOC
  20. struct UnallocatedPortionOfBumpAllocatedBlock
  21. {
  22. };
  23. struct ExplicitFreeListedObject
  24. {
  25. };
  26. Recycler::TrackerData Recycler::TrackerData::EmptyData(&typeid(UnallocatedPortionOfBumpAllocatedBlock), false);
  27. Recycler::TrackerData Recycler::TrackerData::ExplicitFreeListObjectData(&typeid(ExplicitFreeListedObject), false);
  28. #endif
  29. enum ETWEventGCActivationKind : unsigned
  30. {
  31. ETWEvent_GarbageCollect = 0, // force in-thread GC
  32. ETWEvent_ThreadCollect = 1, // thread GC with wait
  33. ETWEvent_ConcurrentCollect = 2,
  34. ETWEvent_PartialCollect = 3,
  35. ETWEvent_ConcurrentMark = 11,
  36. ETWEvent_ConcurrentRescan = 12,
  37. ETWEvent_ConcurrentSweep = 13,
  38. ETWEvent_ConcurrentTransferSwept = 14,
  39. ETWEvent_ConcurrentFinishMark = 15,
  40. };
  41. DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
  42. inline bool
  43. DefaultRecyclerCollectionWrapper::IsCollectionDisabled(Recycler * recycler)
  44. {
  45. // GC shouldn't be triggered during heap enum, unless we missed a case where it allocate memory (which
  46. // shouldn't happen during heap enum) or for the case we explicitly allow allocation
  47. // REVIEW: isHeapEnumInProgress should have been a collection state and checked before to avoid a check here.
  48. // Collection will be disabled in VarDispEx because it could be called from projection re-entrance as ASTA allows
  49. // QI/AddRef/Release to come back.
  50. bool collectionDisabled = recycler->IsCollectionDisabled();
  51. #if DBG
  52. if (collectionDisabled)
  53. {
  54. // disabled collection should only happen if we allowed allocation during heap enum
  55. if (recycler->IsHeapEnumInProgress())
  56. {
  57. Assert(recycler->AllowAllocationDuringHeapEnum());
  58. }
  59. else
  60. {
  61. #ifdef ENABLE_PROJECTION
  62. Assert(recycler->IsInRefCountTrackingForProjection());
  63. #else
  64. Assert(false);
  65. #endif
  66. }
  67. }
  68. #endif
  69. return collectionDisabled;
  70. }
  71. BOOL DefaultRecyclerCollectionWrapper::ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags)
  72. {
  73. if (IsCollectionDisabled(recycler))
  74. {
  75. return FALSE;
  76. }
  77. BOOL ret = FALSE;
  78. BEGIN_NO_EXCEPTION
  79. {
  80. ret = (recycler->*(function))(flags);
  81. }
  82. END_NO_EXCEPTION;
  83. return ret;
  84. }
  85. void
  86. DefaultRecyclerCollectionWrapper::DisposeObjects(Recycler * recycler)
  87. {
  88. if (IsCollectionDisabled(recycler))
  89. {
  90. return;
  91. }
  92. BEGIN_NO_EXCEPTION
  93. {
  94. recycler->DisposeObjects();
  95. }
  96. END_NO_EXCEPTION;
  97. }
  98. static void* GetStackBase();
  99. template _ALWAYSINLINE char * Recycler::AllocWithAttributesInlined<NoBit, false>(size_t size);
  100. template _ALWAYSINLINE char* Recycler::RealAlloc<NoBit, false>(HeapInfo* heap, size_t size);
  101. template _ALWAYSINLINE _Ret_notnull_ void * __cdecl operator new<Recycler>(size_t byteSize, Recycler * alloc, char * (Recycler::*AllocFunc)(size_t));
  102. Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllocator * pageAllocator, void (*outOfMemoryFunc)(), Js::ConfigFlagsTable& configFlagsTable) :
  103. collectionState(CollectionStateNotCollecting),
  104. recyclerFlagsTable(configFlagsTable),
  105. recyclerPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, RecyclerHeuristic::Instance.DefaultMaxAllocPageCount),
  106. recyclerLargeBlockPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount),
  107. threadService(nullptr),
  108. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  109. recyclerWithBarrierPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, PageAllocator::DefaultMaxAllocPageCount, true),
  110. #endif
  111. threadPageAllocator(pageAllocator),
  112. markPagePool(configFlagsTable),
  113. parallelMarkPagePool1(configFlagsTable),
  114. parallelMarkPagePool2(configFlagsTable),
  115. parallelMarkPagePool3(configFlagsTable),
  116. markContext(this, &this->markPagePool),
  117. parallelMarkContext1(this, &this->parallelMarkPagePool1),
  118. parallelMarkContext2(this, &this->parallelMarkPagePool2),
  119. parallelMarkContext3(this, &this->parallelMarkPagePool3),
  120. #if ENABLE_PARTIAL_GC
  121. clientTrackedObjectAllocator(_u("CTO-List"), GetPageAllocator(), Js::Throw::OutOfMemory),
  122. #endif
  123. outOfMemoryFunc(outOfMemoryFunc),
  124. #ifdef RECYCLER_TEST_SUPPORT
  125. checkFn(NULL),
  126. #endif
  127. externalRootMarker(NULL),
  128. externalRootMarkerContext(NULL),
  129. recyclerSweep(nullptr),
  130. inEndMarkOnLowMemory(false),
  131. enableScanInteriorPointers(CUSTOM_CONFIG_FLAG(configFlagsTable, RecyclerForceMarkInterior)),
  132. enableScanImplicitRoots(false),
  133. disableCollectOnAllocationHeuristics(false),
  134. skipStack(false),
  135. mainThreadHandle(NULL),
  136. #if ENABLE_CONCURRENT_GC
  137. backgroundFinishMarkCount(0),
  138. hasPendingUnpinnedObject(false),
  139. hasPendingConcurrentFindRoot(false),
  140. queueTrackedObject(false),
  141. enableConcurrentMark(false), // Default to non-concurrent
  142. enableParallelMark(false),
  143. enableConcurrentSweep(false),
  144. concurrentThread(NULL),
  145. concurrentWorkReadyEvent(NULL),
  146. concurrentWorkDoneEvent(NULL),
  147. parallelThread1(this, &Recycler::ParallelWorkFunc<0>),
  148. parallelThread2(this, &Recycler::ParallelWorkFunc<1>),
  149. priorityBoost(false),
  150. isAborting(false),
  151. #if DBG
  152. concurrentThreadExited(true),
  153. isProcessingTrackedObjects(false),
  154. hasIncompleteDoCollect(false),
  155. isConcurrentGCOnIdle(false),
  156. isFinishGCOnIdle(false),
  157. #endif
  158. #ifdef IDLE_DECOMMIT_ENABLED
  159. concurrentIdleDecommitEvent(nullptr),
  160. #endif
  161. #endif
  162. #if DBG
  163. isExternalStackSkippingGC(false),
  164. isProcessingRescan(false),
  165. #endif
  166. #if ENABLE_PARTIAL_GC
  167. inPartialCollectMode(false),
  168. scanPinnedObjectMap(false),
  169. partialUncollectedAllocBytes(0),
  170. uncollectedNewPageCountPartialCollect((size_t)-1),
  171. #if ENABLE_CONCURRENT_GC
  172. partialConcurrentNextCollection(false),
  173. #endif
  174. #ifdef RECYCLER_STRESS
  175. forcePartialScanStack(false),
  176. #endif
  177. #endif
  178. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  179. isPrimaryMarkContextInitialized(false),
  180. #endif
  181. allowDispose(false),
  182. inDisposeWrapper(false),
  183. hasDisposableObject(false),
  184. tickCountNextDispose(0),
  185. hasPendingTransferDisposedObjects(false),
  186. transientPinnedObject(nullptr),
  187. pinnedObjectMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  188. weakReferenceMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  189. weakReferenceCleanupId(0),
  190. collectionWrapper(&DefaultRecyclerCollectionWrapper::Instance),
  191. isScriptActive(false),
  192. isInScript(false),
  193. isShuttingDown(false),
  194. inExhaustiveCollection(false),
  195. hasExhaustiveCandidate(false),
  196. inDecommitNowCollection(false),
  197. inCacheCleanupCollection(false),
  198. hasPendingDeleteGuestArena(false),
  199. needOOMRescan(false),
  200. #if ENABLE_CONCURRENT_GC && ENABLE_PARTIAL_GC
  201. hasBackgroundFinishPartial(false),
  202. #endif
  203. decommitOnFinish(false)
  204. #ifdef PROFILE_EXEC
  205. , profiler(nullptr)
  206. , backgroundProfiler(nullptr)
  207. , backgroundProfilerPageAllocator(nullptr, configFlagsTable, PageAllocatorType_GCThread)
  208. , backgroundProfilerArena()
  209. #endif
  210. #ifdef PROFILE_MEM
  211. , memoryData(nullptr)
  212. #endif
  213. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  214. , objectGraphDumper(nullptr)
  215. , dumpObjectOnceOnCollect(false)
  216. #endif
  217. #ifdef PROFILE_RECYCLER_ALLOC
  218. , trackerDictionary(nullptr)
  219. #endif
  220. #ifdef HEAP_ENUMERATION_VALIDATION
  221. ,pfPostHeapEnumScanCallback(nullptr)
  222. #endif
  223. #ifdef NTBUILD
  224. , telemetryBlock(&localTelemetryBlock)
  225. #endif
  226. #ifdef ENABLE_JS_ETW
  227. ,bulkFreeMemoryWrittenCount(0)
  228. #endif
  229. #ifdef RECYCLER_PAGE_HEAP
  230. , isPageHeapEnabled(false)
  231. , capturePageHeapAllocStack(false)
  232. , capturePageHeapFreeStack(false)
  233. #endif
  234. , objectBeforeCollectCallbackMap(nullptr)
  235. , objectBeforeCollectCallbackState(ObjectBeforeCollectCallback_None)
  236. #if GLOBAL_ENABLE_WRITE_BARRIER
  237. , pendingWriteBarrierBlockMap(&HeapAllocator::Instance)
  238. #endif
  239. #ifdef PROFILE_RECYCLER_ALLOC
  240. , trackerCriticalSection(nullptr)
  241. #endif
  242. {
  243. #ifdef RECYCLER_MARK_TRACK
  244. this->markMap = NoCheckHeapNew(MarkMap, &NoCheckHeapAllocator::Instance, 163, &markMapCriticalSection);
  245. markContext.SetMarkMap(markMap);
  246. parallelMarkContext1.SetMarkMap(markMap);
  247. parallelMarkContext2.SetMarkMap(markMap);
  248. parallelMarkContext3.SetMarkMap(markMap);
  249. #endif
  250. #ifdef RECYCLER_MEMORY_VERIFY
  251. verifyPad = GetRecyclerFlagsTable().RecyclerVerifyPadSize;
  252. verifyEnabled = GetRecyclerFlagsTable().IsEnabled(Js::RecyclerVerifyFlag);
  253. if (verifyEnabled)
  254. {
  255. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  256. {
  257. pageAlloc->EnableVerify();
  258. });
  259. }
  260. #endif
  261. #ifdef RECYCLER_NO_PAGE_REUSE
  262. if (GetRecyclerFlagsTable().RecyclerNoPageReuse)
  263. {
  264. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  265. {
  266. pageAlloc->DisablePageReuse();
  267. });
  268. }
  269. #endif
  270. this->inDispose = false;
  271. #if DBG
  272. this->heapBlockCount = 0;
  273. this->collectionCount = 0;
  274. this->disableThreadAccessCheck = false;
  275. #if ENABLE_CONCURRENT_GC
  276. this->disableConcurrentThreadExitedCheck = false;
  277. #endif
  278. #endif
  279. #if DBG || defined RECYCLER_TRACE
  280. this->inResolveExternalWeakReferences = false;
  281. #endif
  282. #if DBG || defined(RECYCLER_STATS)
  283. isForceSweeping = false;
  284. #endif
  285. #ifdef RECYCLER_FINALIZE_CHECK
  286. collectionStats.finalizeCount = 0;
  287. #endif
  288. RecyclerMemoryTracking::ReportRecyclerCreate(this);
  289. #if DBG_DUMP
  290. forceTraceMark = false;
  291. recyclerPageAllocator.debugName = _u("Recycler");
  292. recyclerLargeBlockPageAllocator.debugName = _u("RecyclerLargeBlock");
  293. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  294. recyclerWithBarrierPageAllocator.debugName = _u("RecyclerWithBarrier");
  295. #endif
  296. #endif
  297. isHeapEnumInProgress = false;
  298. isCollectionDisabled = false;
  299. #if DBG
  300. allowAllocationDuringRenentrance = false;
  301. allowAllocationDuringHeapEnum = false;
  302. #ifdef ENABLE_PROJECTION
  303. isInRefCountTrackingForProjection = false;
  304. #endif
  305. #endif
  306. ScheduleNextCollection();
  307. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  308. this->inDllCanUnloadNow = false;
  309. this->inDetachProcess = false;
  310. #endif
  311. #ifdef NTBUILD
  312. memset(&localTelemetryBlock, 0, sizeof(localTelemetryBlock));
  313. #endif
  314. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  315. // recycler requires at least Recycler::PrimaryMarkStackReservedPageCount to function properly for the main mark context
  316. this->markContext.SetMaxPageCount(max(static_cast<size_t>(GetRecyclerFlagsTable().MaxMarkStackPageCount), static_cast<size_t>(Recycler::PrimaryMarkStackReservedPageCount)));
  317. this->parallelMarkContext1.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  318. this->parallelMarkContext2.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  319. this->parallelMarkContext3.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  320. if (GetRecyclerFlagsTable().IsEnabled(Js::GCMemoryThresholdFlag))
  321. {
  322. // Note, we can't do this in the constructor for RecyclerHeuristic::Instance because it runs before config is processed
  323. RecyclerHeuristic::Instance.ConfigureBaseFactor(GetRecyclerFlagsTable().GCMemoryThreshold);
  324. }
  325. #endif
  326. }
  327. #if DBG
  328. void
  329. Recycler::SetDisableThreadAccessCheck()
  330. {
  331. recyclerPageAllocator.SetDisableThreadAccessCheck();
  332. recyclerLargeBlockPageAllocator.SetDisableThreadAccessCheck();
  333. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  334. recyclerWithBarrierPageAllocator.SetDisableThreadAccessCheck();
  335. #endif
  336. disableThreadAccessCheck = true;
  337. }
  338. #endif
  339. void
  340. Recycler::SetMemProtectMode()
  341. {
  342. this->enableScanInteriorPointers = true;
  343. this->enableScanImplicitRoots = true;
  344. this->disableCollectOnAllocationHeuristics = true;
  345. #ifdef RECYCLER_STRESS
  346. this->recyclerStress = GetRecyclerFlagsTable().MemProtectHeapStress;
  347. #if ENABLE_CONCURRENT_GC
  348. this->recyclerBackgroundStress = GetRecyclerFlagsTable().MemProtectHeapBackgroundStress;
  349. this->recyclerConcurrentStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentStress;
  350. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentRepeatStress;
  351. #endif
  352. #if ENABLE_PARTIAL_GC
  353. this->recyclerPartialStress = GetRecyclerFlagsTable().MemProtectHeapPartialStress;
  354. #endif
  355. #endif
  356. }
  357. void
  358. Recycler::LogMemProtectHeapSize(bool fromGC)
  359. {
  360. Assert(IsMemProtectMode());
  361. #ifdef ENABLE_JS_ETW
  362. if (IS_JS_ETW(EventEnabledMEMPROTECT_GC_HEAP_SIZE()))
  363. {
  364. IdleDecommitPageAllocator* recyclerPageAllocator = GetRecyclerPageAllocator();
  365. IdleDecommitPageAllocator* recyclerLeafPageAllocator = GetRecyclerLeafPageAllocator();
  366. IdleDecommitPageAllocator* recyclerLargeBlockPageAllocator = GetRecyclerLargeBlockPageAllocator();
  367. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  368. IdleDecommitPageAllocator* recyclerWithBarrierPageAllocator = GetRecyclerWithBarrierPageAllocator();
  369. #endif
  370. size_t usedBytes = (recyclerPageAllocator->usedBytes + recyclerLeafPageAllocator->usedBytes +
  371. recyclerLargeBlockPageAllocator->usedBytes);
  372. size_t reservedBytes = (recyclerPageAllocator->reservedBytes + recyclerLeafPageAllocator->reservedBytes +
  373. recyclerLargeBlockPageAllocator->reservedBytes);
  374. size_t committedBytes = (recyclerPageAllocator->committedBytes + recyclerLeafPageAllocator->committedBytes +
  375. recyclerLargeBlockPageAllocator->committedBytes);
  376. size_t numberOfSegments = (recyclerPageAllocator->numberOfSegments +
  377. recyclerLeafPageAllocator->numberOfSegments +
  378. recyclerLargeBlockPageAllocator->numberOfSegments);
  379. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  380. usedBytes += recyclerWithBarrierPageAllocator->usedBytes;
  381. reservedBytes += recyclerWithBarrierPageAllocator->reservedBytes;
  382. committedBytes += recyclerWithBarrierPageAllocator->committedBytes;
  383. numberOfSegments += recyclerWithBarrierPageAllocator->numberOfSegments;
  384. #endif
  385. JS_ETW(EventWriteMEMPROTECT_GC_HEAP_SIZE(this, usedBytes, reservedBytes, committedBytes, numberOfSegments, fromGC));
  386. }
  387. #endif
  388. }
  389. #if DBG
  390. void
  391. Recycler::SetDisableConcurrentThreadExitedCheck()
  392. {
  393. #if ENABLE_CONCURRENT_GC
  394. disableConcurrentThreadExitedCheck = true;
  395. #endif
  396. #ifdef RECYCLER_STRESS
  397. this->recyclerStress = false;
  398. #if ENABLE_CONCURRENT_GC
  399. this->recyclerBackgroundStress = false;
  400. this->recyclerConcurrentStress = false;
  401. this->recyclerConcurrentRepeatStress = false;
  402. #endif
  403. #if ENABLE_PARTIAL_GC
  404. this->recyclerPartialStress = false;
  405. #endif
  406. #endif
  407. }
  408. #endif
  409. #if DBG
  410. void
  411. Recycler::ResetThreadId()
  412. {
  413. // Transfer all the page allocator to the current thread id
  414. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  415. {
  416. pageAlloc->ClearConcurrentThreadId();
  417. });
  418. #if ENABLE_CONCURRENT_GC
  419. if (this->IsConcurrentEnabled())
  420. {
  421. markContext.GetPageAllocator()->ClearConcurrentThreadId();
  422. }
  423. #endif
  424. #if defined(DBG) && defined(PROFILE_EXEC)
  425. this->backgroundProfilerPageAllocator.ClearConcurrentThreadId();
  426. #endif
  427. }
  428. #endif
  429. Recycler::~Recycler()
  430. {
  431. #if ENABLE_CONCURRENT_GC
  432. Assert(!this->isAborting);
  433. #endif
  434. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  435. recyclerListLock.Enter();
  436. if (recyclerList == this)
  437. {
  438. recyclerList = this->next;
  439. }
  440. else if(recyclerList)
  441. {
  442. Recycler* list = recyclerList;
  443. while (list->next != this)
  444. {
  445. list = list->next;
  446. }
  447. list->next = this->next;
  448. }
  449. recyclerListLock.Leave();
  450. #endif
  451. // Stop any further collection
  452. this->isShuttingDown = true;
  453. #if DBG
  454. this->ResetThreadId();
  455. #endif
  456. #ifdef ENABLE_JS_ETW
  457. FlushFreeRecord();
  458. #endif
  459. ClearObjectBeforeCollectCallbacks();
  460. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  461. if (GetRecyclerFlagsTable().DumpObjectGraphOnExit)
  462. {
  463. // Always skip stack here, as we may be running the dtor on another thread.
  464. RecyclerObjectGraphDumper::Param param = { 0 };
  465. param.skipStack = true;
  466. this->DumpObjectGraph(&param);
  467. }
  468. #endif
  469. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): %s"), this, this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") :
  470. this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  471. #ifdef LEAK_REPORT
  472. ReportLeaks();
  473. #endif
  474. #ifdef CHECK_MEMORY_LEAK
  475. CheckLeaks(this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") : this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  476. #endif
  477. AUTO_LEAK_REPORT_SECTION_0(this->GetRecyclerFlagsTable(), _u("Skipped finalizers"));
  478. #if ENABLE_CONCURRENT_GC
  479. Assert(concurrentThread == nullptr);
  480. // We only sometime clean up the state after abort concurrent to not collection
  481. // Still need to delete heap block that is held by the recyclerSweep
  482. if (recyclerSweep != nullptr)
  483. {
  484. recyclerSweep->ShutdownCleanup();
  485. recyclerSweep = nullptr;
  486. }
  487. if (mainThreadHandle != nullptr)
  488. {
  489. CloseHandle(mainThreadHandle);
  490. }
  491. #endif
  492. recyclerPageAllocator.Close();
  493. recyclerLargeBlockPageAllocator.Close();
  494. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  495. recyclerWithBarrierPageAllocator.Close();
  496. #endif
  497. markContext.Release();
  498. parallelMarkContext1.Release();
  499. parallelMarkContext2.Release();
  500. parallelMarkContext3.Release();
  501. // Clean up the weak reference map so that
  502. // objects being finalized can safely refer to weak references
  503. // (this could otherwise become a problem for weak references held
  504. // to large objects since their block would be destroyed before
  505. // the finalizer was run)
  506. // When the recycler is shutting down, all objects are going to be reclaimed
  507. // so null out the weak references so that anyone relying on weak
  508. // references simply thinks the object has been reclaimed
  509. weakReferenceMap.Map([](RecyclerWeakReferenceBase * weakRef) -> bool
  510. {
  511. weakRef->strongRef = nullptr;
  512. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  513. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  514. // Remove
  515. return false;
  516. });
  517. #if ENABLE_PARTIAL_GC
  518. clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  519. #endif
  520. #ifdef PROFILE_RECYCLER_ALLOC
  521. if (trackerDictionary != nullptr)
  522. {
  523. this->trackerDictionary->Map([](type_info const *, TrackerItem * item)
  524. {
  525. NoCheckHeapDelete(item);
  526. });
  527. NoCheckHeapDelete(this->trackerDictionary);
  528. this->trackerDictionary = nullptr;
  529. delete(trackerCriticalSection);
  530. }
  531. #endif
  532. #ifdef RECYCLER_MARK_TRACK
  533. NoCheckHeapDelete(this->markMap);
  534. this->markMap = nullptr;
  535. #endif
  536. #if DBG
  537. // Disable idle decommit asserts
  538. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  539. {
  540. pageAlloc->ShutdownIdleDecommit();
  541. });
  542. #endif
  543. Assert(this->collectionState == CollectionStateExit || this->collectionState == CollectionStateNotCollecting);
  544. #if ENABLE_CONCURRENT_GC
  545. Assert(this->disableConcurrentThreadExitedCheck || this->concurrentThreadExited == true);
  546. #endif
  547. }
  548. void
  549. Recycler::SetIsThreadBound()
  550. {
  551. Assert(mainThreadHandle == nullptr);
  552. ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), &mainThreadHandle,
  553. 0, FALSE, DUPLICATE_SAME_ACCESS);
  554. stackBase = GetStackBase();
  555. }
  556. void
  557. Recycler::RootAddRef(void* obj, uint *count)
  558. {
  559. Assert(this->IsValidObject(obj));
  560. if (transientPinnedObject)
  561. {
  562. PinRecord& refCount = pinnedObjectMap.GetReference(transientPinnedObject);
  563. ++refCount;
  564. if (refCount == 1)
  565. {
  566. this->scanPinnedObjectMap = true;
  567. RECYCLER_PERF_COUNTER_INC(PinnedObject);
  568. }
  569. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  570. #ifdef STACK_BACK_TRACE
  571. if (GetRecyclerFlagsTable().LeakStackTrace)
  572. {
  573. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount.stackBackTraces,
  574. transientPinnedObjectStackBackTrace);
  575. }
  576. #endif
  577. #endif
  578. }
  579. if (count != nullptr)
  580. {
  581. PinRecord* refCount = pinnedObjectMap.TryGetReference(obj);
  582. *count = (refCount != nullptr) ? (*refCount + 1) : 1;
  583. }
  584. transientPinnedObject = obj;
  585. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  586. #ifdef STACK_BACK_TRACE
  587. if (GetRecyclerFlagsTable().LeakStackTrace)
  588. {
  589. transientPinnedObjectStackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance);
  590. }
  591. #endif
  592. #endif
  593. }
  594. void
  595. Recycler::RootRelease(void* obj, uint *count)
  596. {
  597. Assert(this->IsValidObject(obj));
  598. if (transientPinnedObject == obj)
  599. {
  600. transientPinnedObject = nullptr;
  601. if (count != nullptr)
  602. {
  603. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  604. *count = (refCount != nullptr) ? *refCount : 0;
  605. }
  606. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  607. #ifdef STACK_BACK_TRACE
  608. if (GetRecyclerFlagsTable().LeakStackTrace)
  609. {
  610. transientPinnedObjectStackBackTrace->Delete(&NoCheckHeapAllocator::Instance);
  611. }
  612. #endif
  613. #endif
  614. }
  615. else
  616. {
  617. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  618. if (refCount == nullptr)
  619. {
  620. if (count != nullptr)
  621. {
  622. *count = (uint)-1;
  623. }
  624. // REVIEW: throw if not found
  625. Assert(false);
  626. return;
  627. }
  628. uint newRefCount = (--(*refCount));
  629. if (count != nullptr)
  630. {
  631. *count = newRefCount;
  632. }
  633. if (newRefCount != 0)
  634. {
  635. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  636. #ifdef STACK_BACK_TRACE
  637. if (GetRecyclerFlagsTable().LeakStackTrace)
  638. {
  639. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces,
  640. StackBackTrace::Capture(&NoCheckHeapAllocator::Instance));
  641. }
  642. #endif
  643. #endif
  644. return;
  645. }
  646. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  647. #ifdef STACK_BACK_TRACE
  648. StackBackTraceNode::DeleteAll(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces);
  649. refCount->stackBackTraces = nullptr;
  650. #endif
  651. #endif
  652. #if ENABLE_CONCURRENT_GC
  653. // Don't delete the entry if we are in concurrent find root state
  654. // We will delete it later on in-thread find root
  655. if (this->hasPendingConcurrentFindRoot)
  656. {
  657. this->hasPendingUnpinnedObject = true;
  658. }
  659. else
  660. #endif
  661. {
  662. pinnedObjectMap.Remove(obj);
  663. }
  664. RECYCLER_PERF_COUNTER_DEC(PinnedObject);
  665. }
  666. // Any time a root is removed during a GC, it indicates that an exhaustive
  667. // collection is likely going to have work to do so trigger an exhaustive
  668. // candidate GC to indicate this fact
  669. this->CollectNow<CollectExhaustiveCandidate>();
  670. }
  671. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  672. Recycler* Recycler::recyclerList = nullptr;
  673. CriticalSection Recycler::recyclerListLock;
  674. #endif
  675. void
  676. Recycler::Initialize(const bool forceInThread, JsUtil::ThreadService *threadService, const bool deferThreadStartup
  677. #ifdef RECYCLER_PAGE_HEAP
  678. , PageHeapMode pageheapmode
  679. , bool captureAllocCallStack
  680. , bool captureFreeCallStack
  681. #endif
  682. )
  683. {
  684. #ifdef PROFILE_RECYCLER_ALLOC
  685. this->InitializeProfileAllocTracker();
  686. #endif
  687. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  688. this->disableCollection = CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::RecyclerPhase);
  689. #endif
  690. #if ENABLE_CONCURRENT_GC
  691. this->skipStack = false;
  692. #endif
  693. #if ENABLE_PARTIAL_GC
  694. #if ENABLE_DEBUG_CONFIG_OPTIONS
  695. this->enablePartialCollect = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::PartialCollectPhase);
  696. #else
  697. this->enablePartialCollect = true;
  698. #endif
  699. #endif
  700. #ifdef PROFILE_MEM
  701. this->memoryData = MemoryProfiler::GetRecyclerMemoryData();
  702. #endif
  703. #if DBG || DBG_DUMP || defined(RECYCLER_TRACE)
  704. mainThreadId = GetCurrentThreadContextId();
  705. #endif
  706. #ifdef RECYCLER_TRACE
  707. collectionParam.domCollect = false;
  708. #endif
  709. #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING)
  710. bool dontNeedDetailedTracking = false;
  711. #if defined(PROFILE_RECYCLER_ALLOC)
  712. dontNeedDetailedTracking = dontNeedDetailedTracking || this->trackerDictionary == nullptr;
  713. #endif
  714. #if defined(RECYCLER_MEMORY_VERIFY)
  715. dontNeedDetailedTracking = dontNeedDetailedTracking || !this->verifyEnabled;
  716. #endif
  717. // If we need detailed tracking we force allocation fast path in the JIT to fail and go to the helper, so there is no
  718. // need for the TrackNativeAllocatedMemoryBlock callback.
  719. if (dontNeedDetailedTracking)
  720. {
  721. autoHeap.Initialize(this, TrackNativeAllocatedMemoryBlock
  722. #ifdef RECYCLER_PAGE_HEAP
  723. , pageheapmode
  724. , captureAllocCallStack
  725. , captureFreeCallStack
  726. #endif
  727. );
  728. }
  729. else
  730. {
  731. autoHeap.Initialize(this
  732. #ifdef RECYCLER_PAGE_HEAP
  733. , pageheapmode
  734. , captureAllocCallStack
  735. , captureFreeCallStack
  736. #endif
  737. );
  738. }
  739. #else
  740. autoHeap.Initialize(this
  741. #ifdef RECYCLER_PAGE_HEAP
  742. , pageheapmode
  743. , captureAllocCallStack
  744. , captureFreeCallStack
  745. #endif
  746. );
  747. #endif
  748. markContext.Init(Recycler::PrimaryMarkStackReservedPageCount);
  749. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  750. isPrimaryMarkContextInitialized = true;
  751. #endif
  752. #ifdef RECYCLER_PAGE_HEAP
  753. isPageHeapEnabled = autoHeap.IsPageHeapEnabled();
  754. if (IsPageHeapEnabled())
  755. {
  756. capturePageHeapAllocStack = autoHeap.captureAllocCallStack;
  757. capturePageHeapFreeStack = autoHeap.captureFreeCallStack;
  758. }
  759. #endif
  760. #ifdef RECYCLER_STRESS
  761. #if ENABLE_PARTIAL_GC
  762. if (GetRecyclerFlagsTable().RecyclerTrackStress)
  763. {
  764. // Disable partial if we are doing track stress, since partial relies on ClientTracked processing
  765. // and track stress doesn't support this.
  766. this->enablePartialCollect = false;
  767. }
  768. #endif
  769. this->recyclerStress = GetRecyclerFlagsTable().RecyclerStress;
  770. #if ENABLE_CONCURRENT_GC
  771. this->recyclerBackgroundStress = GetRecyclerFlagsTable().RecyclerBackgroundStress;
  772. this->recyclerConcurrentStress = GetRecyclerFlagsTable().RecyclerConcurrentStress;
  773. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().RecyclerConcurrentRepeatStress;
  774. #endif
  775. #if ENABLE_PARTIAL_GC
  776. this->recyclerPartialStress = GetRecyclerFlagsTable().RecyclerPartialStress;
  777. #endif
  778. #endif
  779. bool needWriteWatch = false;
  780. #if ENABLE_CONCURRENT_GC
  781. // Default to non-concurrent
  782. uint numProcs = (uint)AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
  783. this->maxParallelism = (numProcs > 4) || CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) ? 4 : numProcs;
  784. if (forceInThread)
  785. {
  786. // Requested a non-concurrent recycler
  787. this->disableConcurrent = true;
  788. }
  789. #if ENABLE_DEBUG_CONFIG_OPTIONS
  790. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentCollectPhase))
  791. {
  792. // Concurrent collection disabled
  793. this->disableConcurrent = true;
  794. }
  795. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase) &&
  796. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) &&
  797. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase))
  798. {
  799. // All concurrent collection phases disabled
  800. this->disableConcurrent = true;
  801. }
  802. #endif
  803. else
  804. {
  805. this->disableConcurrent = false;
  806. if (deferThreadStartup || EnableConcurrent(threadService, false))
  807. {
  808. #ifdef RECYCLER_WRITE_WATCH
  809. needWriteWatch = true;
  810. #endif
  811. }
  812. }
  813. #endif // ENABLE_CONCURRENT_GC
  814. #if ENABLE_PARTIAL_GC
  815. if (this->enablePartialCollect)
  816. {
  817. #ifdef RECYCLER_WRITE_WATCH
  818. needWriteWatch = true;
  819. #endif
  820. }
  821. #endif
  822. #if ENABLE_CONCURRENT_GC
  823. #ifdef RECYCLER_WRITE_WATCH
  824. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  825. {
  826. if (needWriteWatch)
  827. {
  828. // need write watch to support concurrent and/or partial collection
  829. recyclerPageAllocator.EnableWriteWatch();
  830. recyclerLargeBlockPageAllocator.EnableWriteWatch();
  831. }
  832. }
  833. #endif
  834. #else
  835. Assert(!needWriteWatch);
  836. #endif
  837. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  838. recyclerListLock.Enter();
  839. this->next = recyclerList;
  840. recyclerList = this;
  841. recyclerListLock.Leave();
  842. #endif
  843. }
  844. BOOL
  845. Recycler::CollectionInProgress() const
  846. {
  847. return collectionState != CollectionStateNotCollecting;
  848. }
  849. BOOL
  850. Recycler::IsExiting() const
  851. {
  852. return (collectionState == Collection_Exit);
  853. }
  854. BOOL
  855. Recycler::IsSweeping() const
  856. {
  857. return ((collectionState & Collection_Sweep) == Collection_Sweep);
  858. }
  859. void
  860. Recycler::SetIsScriptActive(bool isScriptActive)
  861. {
  862. Assert(this->isInScript);
  863. Assert(this->isScriptActive != isScriptActive);
  864. this->isScriptActive = isScriptActive;
  865. if (isScriptActive)
  866. {
  867. this->tickCountNextDispose = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  868. }
  869. }
  870. void
  871. Recycler::SetIsInScript(bool isInScript)
  872. {
  873. Assert(this->isInScript != isInScript);
  874. this->isInScript = isInScript;
  875. }
  876. bool
  877. Recycler::NeedOOMRescan() const
  878. {
  879. return this->needOOMRescan;
  880. }
  881. void
  882. Recycler::SetNeedOOMRescan()
  883. {
  884. this->needOOMRescan = true;
  885. }
  886. void
  887. Recycler::ClearNeedOOMRescan()
  888. {
  889. this->needOOMRescan = false;
  890. markContext.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  891. parallelMarkContext1.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  892. parallelMarkContext2.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  893. parallelMarkContext3.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  894. }
  895. bool
  896. Recycler::IsMemProtectMode()
  897. {
  898. return this->enableScanImplicitRoots;
  899. }
  900. size_t
  901. Recycler::GetUsedBytes()
  902. {
  903. size_t usedBytes = threadPageAllocator->usedBytes;
  904. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  905. usedBytes += recyclerWithBarrierPageAllocator.usedBytes;
  906. #endif
  907. usedBytes += recyclerPageAllocator.usedBytes;
  908. usedBytes += recyclerLargeBlockPageAllocator.usedBytes;
  909. #if GLOBAL_ENABLE_WRITE_BARRIER
  910. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  911. {
  912. Assert(recyclerPageAllocator.usedBytes == 0);
  913. }
  914. #endif
  915. return usedBytes;
  916. }
  917. IdleDecommitPageAllocator*
  918. Recycler::GetRecyclerPageAllocator()
  919. {
  920. // TODO: SWB this is for Finalizable leaf allocation, which we didn't implement leaf bucket for it
  921. // remove this after the finalizable leaf bucket is implemented
  922. #if GLOBAL_ENABLE_WRITE_BARRIER
  923. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  924. {
  925. return &this->recyclerWithBarrierPageAllocator;
  926. }
  927. else
  928. #endif
  929. {
  930. #if defined(RECYCLER_WRITE_WATCH) || !defined(RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE)
  931. return &this->recyclerPageAllocator;
  932. #else
  933. return &this->recyclerWithBarrierPageAllocator;
  934. #endif
  935. }
  936. }
  937. IdleDecommitPageAllocator*
  938. Recycler::GetRecyclerLargeBlockPageAllocator()
  939. {
  940. return &this->recyclerLargeBlockPageAllocator;
  941. }
  942. IdleDecommitPageAllocator*
  943. Recycler::GetRecyclerLeafPageAllocator()
  944. {
  945. return this->threadPageAllocator;
  946. }
  947. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  948. IdleDecommitPageAllocator*
  949. Recycler::GetRecyclerWithBarrierPageAllocator()
  950. {
  951. return &this->recyclerWithBarrierPageAllocator;
  952. }
  953. #endif
  954. #if DBG
  955. BOOL
  956. Recycler::IsFreeObject(void * candidate)
  957. {
  958. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  959. if (heapBlock != NULL)
  960. {
  961. return heapBlock->IsFreeObject(candidate);
  962. }
  963. return false;
  964. }
  965. #endif
  966. BOOL
  967. Recycler::IsValidObject(void* candidate, size_t minimumSize)
  968. {
  969. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  970. if (heapBlock != NULL)
  971. {
  972. return heapBlock->IsValidObject(candidate) && (minimumSize == 0 || heapBlock->GetObjectSize(candidate) >= minimumSize);
  973. }
  974. return false;
  975. }
  976. void
  977. Recycler::Prime()
  978. {
  979. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  980. if (GetRecyclerFlagsTable().IsEnabled(Js::ForceFragmentAddressSpaceFlag))
  981. {
  982. // Never prime the recycler if we are forced to fragment address space
  983. return;
  984. }
  985. #endif
  986. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  987. {
  988. pageAlloc->Prime(RecyclerPageAllocator::DefaultPrimePageCount);
  989. });
  990. }
  991. void
  992. Recycler::AddExternalMemoryUsage(size_t size)
  993. {
  994. this->autoHeap.uncollectedAllocBytes += size;
  995. this->autoHeap.uncollectedExternalBytes += size;
  996. // Generally normal GC can cleanup the uncollectedAllocBytes. But if external components
  997. // do fast large allocations in a row, normal GC might not kick in. Let's force the GC
  998. // here if we need to collect anyhow.
  999. CollectNow<CollectOnAllocation>();
  1000. }
  1001. bool Recycler::RequestExternalMemoryAllocation(size_t size)
  1002. {
  1003. return recyclerPageAllocator.RequestAlloc(size);
  1004. }
  1005. void Recycler::ReportExternalMemoryFailure(size_t size)
  1006. {
  1007. recyclerPageAllocator.ReportFailure(size);
  1008. }
  1009. void Recycler::ReportExternalMemoryFree(size_t size)
  1010. {
  1011. recyclerPageAllocator.ReportFree(size);
  1012. }
  1013. /*------------------------------------------------------------------------------------------------
  1014. * Idle Decommit
  1015. *------------------------------------------------------------------------------------------------*/
  1016. void
  1017. Recycler::EnterIdleDecommit()
  1018. {
  1019. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  1020. {
  1021. pageAlloc->EnterIdleDecommit();
  1022. });
  1023. #ifdef IDLE_DECOMMIT_ENABLED
  1024. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_None, IdleDecommitSignal_NeedTimer);
  1025. #endif
  1026. }
  1027. void
  1028. Recycler::LeaveIdleDecommit()
  1029. {
  1030. #ifdef IDLE_DECOMMIT_ENABLED
  1031. bool allowTimer = (this->concurrentIdleDecommitEvent != nullptr);
  1032. IdleDecommitSignal idleDecommitSignalRecycler = recyclerPageAllocator.LeaveIdleDecommit(allowTimer);
  1033. IdleDecommitSignal idleDecommitSignalRecyclerLargeBlock = recyclerLargeBlockPageAllocator.LeaveIdleDecommit(allowTimer);
  1034. IdleDecommitSignal idleDecommitSignal = max(idleDecommitSignalRecycler, idleDecommitSignalRecyclerLargeBlock);
  1035. IdleDecommitSignal idleDecommitSignalThread = threadPageAllocator->LeaveIdleDecommit(allowTimer);
  1036. idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalThread);
  1037. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  1038. IdleDecommitSignal idleDecommitSignalRecyclerWithBarrier = recyclerWithBarrierPageAllocator.LeaveIdleDecommit(allowTimer);
  1039. idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalRecyclerWithBarrier);
  1040. #endif
  1041. if (idleDecommitSignal != IdleDecommitSignal_None)
  1042. {
  1043. Assert(allowTimer);
  1044. // Reduce the number of times we need to signal the background thread
  1045. // by detecting whether the thread is waiting on a time out or not
  1046. if (idleDecommitSignal == IdleDecommitSignal_NeedSignal ||
  1047. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedTimer, IdleDecommitSignal_None) == IdleDecommitSignal_NeedSignal)
  1048. {
  1049. #if DBG
  1050. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  1051. {
  1052. Output::Print(_u("Recycler Thread IdleDecommit Need Signal\n"));
  1053. Output::Flush();
  1054. }
  1055. #endif
  1056. SetEvent(this->concurrentIdleDecommitEvent);
  1057. }
  1058. }
  1059. #else
  1060. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  1061. {
  1062. pageAlloc->LeaveIdleDecommit(false);
  1063. });
  1064. #endif
  1065. }
  1066. /*------------------------------------------------------------------------------------------------
  1067. * Freeing
  1068. *------------------------------------------------------------------------------------------------*/
  1069. bool Recycler::ExplicitFreeLeaf(void* buffer, size_t size)
  1070. {
  1071. return ExplicitFreeInternalWrapper<ObjectInfoBits::LeafBit>(buffer, size);
  1072. }
  1073. bool Recycler::ExplicitFreeNonLeaf(void* buffer, size_t size)
  1074. {
  1075. return ExplicitFreeInternalWrapper<ObjectInfoBits::NoBit>(buffer, size);
  1076. }
  1077. size_t Recycler::GetAllocSize(size_t size)
  1078. {
  1079. size_t allocSize = size;
  1080. #ifdef RECYCLER_MEMORY_VERIFY
  1081. if (this->VerifyEnabled())
  1082. {
  1083. allocSize += verifyPad + sizeof(size_t);
  1084. Assert(allocSize > size);
  1085. }
  1086. #endif
  1087. return allocSize;
  1088. }
  1089. template <typename TBlockAttributes>
  1090. void Recycler::SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes)
  1091. {
  1092. Assert(!heapBlock->IsLargeHeapBlock());
  1093. Assert(heapBlock->GetObjectSize(buffer) == sizeCat);
  1094. SmallHeapBlockT<TBlockAttributes>* smallBlock = (SmallHeapBlockT<TBlockAttributes>*)heapBlock;
  1095. if ((attributes & ObjectInfoBits::LeafBit) == LeafBit)
  1096. {
  1097. Assert(smallBlock->IsLeafBlock());
  1098. }
  1099. else
  1100. {
  1101. Assert(smallBlock->IsAnyNormalBlock());
  1102. }
  1103. #ifdef RECYCLER_MEMORY_VERIFY
  1104. smallBlock->SetExplicitFreeBitForObject(buffer);
  1105. #endif
  1106. }
  1107. template <ObjectInfoBits attributes>
  1108. bool Recycler::ExplicitFreeInternalWrapper(void* buffer, size_t size)
  1109. {
  1110. Assert(buffer != nullptr);
  1111. Assert(size > 0);
  1112. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  1113. if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ExplicitFreePhase))
  1114. {
  1115. return false;
  1116. }
  1117. #endif
  1118. size_t allocSize = GetAllocSize(size);
  1119. if (HeapInfo::IsSmallObject(allocSize))
  1120. {
  1121. return ExplicitFreeInternal<attributes, SmallAllocationBlockAttributes>(buffer, size, HeapInfo::GetAlignedSizeNoCheck(allocSize));
  1122. }
  1123. if (HeapInfo::IsMediumObject(allocSize))
  1124. {
  1125. return ExplicitFreeInternal<attributes, MediumAllocationBlockAttributes>(buffer, size, HeapInfo::GetMediumObjectAlignedSizeNoCheck(allocSize));
  1126. }
  1127. return false;
  1128. }
  1129. template <ObjectInfoBits attributes, typename TBlockAttributes>
  1130. bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
  1131. {
  1132. // If the GC is in sweep state while FreeInternal is called, we might be executing a finalizer
  1133. // which called Free, which would cause a "sweepable" buffer to be free-listed. Don't allow this.
  1134. // Also don't allow freeing while we're shutting down the recycler since finalizers get executed
  1135. // at this stage too
  1136. if (this->IsSweeping() || this->IsExiting())
  1137. {
  1138. return false;
  1139. }
  1140. #if ENABLE_CONCURRENT_GC
  1141. // We shouldn't be freeing object when we are running GC in thread
  1142. Assert(this->IsConcurrentState() || !this->CollectionInProgress() || this->IsAllocatableCallbackState());
  1143. #else
  1144. Assert(!this->CollectionInProgress() || this->IsAllocatableCallbackState());
  1145. #endif
  1146. DebugOnly(RecyclerHeapObjectInfo info);
  1147. Assert(this->FindHeapObject(buffer, FindHeapObjectFlags_NoFreeBitVerify, info));
  1148. Assert((info.GetAttributes() & ~ObjectInfoBits::LeafBit) == 0); // Only NoBit or LeafBit
  1149. #if DBG || defined(RECYCLER_MEMORY_VERIFY) || defined(RECYCLER_PAGE_HEAP)
  1150. // Either the mainThreadHandle is null (we're not thread bound)
  1151. // or we should be calling this function on the main script thread
  1152. Assert(this->mainThreadHandle == NULL ||
  1153. ::GetCurrentThreadId() == ::GetThreadId(this->mainThreadHandle));
  1154. HeapBlock* heapBlock = this->FindHeapBlock(buffer);
  1155. Assert(heapBlock != nullptr);
  1156. #ifdef RECYCLER_PAGE_HEAP
  1157. if (this->IsPageHeapEnabled())
  1158. {
  1159. #ifdef STACK_BACK_TRACE
  1160. if (this->ShouldCapturePageHeapFreeStack())
  1161. {
  1162. if (heapBlock->IsLargeHeapBlock())
  1163. {
  1164. LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
  1165. if (largeHeapBlock->InPageHeapMode())
  1166. {
  1167. largeHeapBlock->CapturePageHeapFreeStack();
  1168. }
  1169. }
  1170. }
  1171. #endif
  1172. // Don't do actual explicit free in page heap mode
  1173. return false;
  1174. }
  1175. #endif
  1176. SetExplicitFreeBitOnSmallBlock<TBlockAttributes>(heapBlock, sizeCat, buffer, attributes);
  1177. #endif
  1178. if (TBlockAttributes::IsMediumBlock)
  1179. {
  1180. autoHeap.FreeMediumObject<attributes>(buffer, sizeCat);
  1181. }
  1182. else
  1183. {
  1184. autoHeap.FreeSmallObject<attributes>(buffer, sizeCat);
  1185. }
  1186. if (size > sizeof(FreeObject) || TBlockAttributes::IsMediumBlock)
  1187. {
  1188. // Do this on the background somehow?
  1189. byte expectedFill = 0;
  1190. size_t fillSize = size - sizeof(FreeObject);
  1191. #ifdef RECYCLER_MEMORY_VERIFY
  1192. if (this->VerifyEnabled())
  1193. {
  1194. expectedFill = Recycler::VerifyMemFill;
  1195. }
  1196. #endif
  1197. memset(((char*)buffer) + sizeof(FreeObject), expectedFill, fillSize);
  1198. }
  1199. #ifdef PROFILE_RECYCLER_ALLOC
  1200. if (this->trackerDictionary != nullptr)
  1201. {
  1202. this->SetTrackerData(buffer, &TrackerData::ExplicitFreeListObjectData);
  1203. }
  1204. #endif
  1205. return true;
  1206. }
  1207. /*------------------------------------------------------------------------------------------------
  1208. * Allocation
  1209. *------------------------------------------------------------------------------------------------*/
  1210. char *
  1211. Recycler::TryLargeAlloc(HeapInfo * heap, size_t size, ObjectInfoBits attributes, bool nothrow)
  1212. {
  1213. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1214. Assert(size != 0);
  1215. size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);
  1216. if (sizeCat == 0)
  1217. {
  1218. // overflow scenario
  1219. // if onthrow is false, throw out of memory
  1220. // otherwise, return null
  1221. if (nothrow == false)
  1222. {
  1223. this->OutOfMemory();
  1224. }
  1225. return nullptr;
  1226. }
  1227. char * memBlock;
  1228. if (heap->largeObjectBucket.largeBlockList != nullptr)
  1229. {
  1230. memBlock = heap->largeObjectBucket.largeBlockList->Alloc(sizeCat, attributes);
  1231. if (memBlock != nullptr)
  1232. {
  1233. #ifdef RECYCLER_ZERO_MEM_CHECK
  1234. VerifyZeroFill(memBlock, sizeCat);
  1235. #endif
  1236. return memBlock;
  1237. }
  1238. }
  1239. // We don't care whether a GC happened here or not, because we are not reusing freed
  1240. // large objects. We might try to allocate from existing block if we implement
  1241. // large object reuse.
  1242. if (!this->disableCollectOnAllocationHeuristics)
  1243. {
  1244. CollectNow<CollectOnAllocation>();
  1245. }
  1246. #ifdef RECYCLER_PAGE_HEAP
  1247. if (IsPageHeapEnabled())
  1248. {
  1249. if (heap->largeObjectBucket.IsPageHeapEnabled(attributes))
  1250. {
  1251. memBlock = heap->largeObjectBucket.PageHeapAlloc(this, sizeCat, size, (ObjectInfoBits)attributes, autoHeap.pageHeapMode, nothrow);
  1252. if (memBlock != nullptr)
  1253. {
  1254. #ifdef RECYCLER_ZERO_MEM_CHECK
  1255. VerifyZeroFill(memBlock, size);
  1256. #endif
  1257. return memBlock;
  1258. }
  1259. }
  1260. }
  1261. #endif
  1262. LargeHeapBlock * heapBlock = heap->AddLargeHeapBlock(sizeCat);
  1263. if (heapBlock == nullptr)
  1264. {
  1265. return nullptr;
  1266. }
  1267. memBlock = heapBlock->Alloc(sizeCat, attributes);
  1268. Assert(memBlock != nullptr);
  1269. #ifdef RECYCLER_ZERO_MEM_CHECK
  1270. VerifyZeroFill(memBlock, sizeCat);
  1271. #endif
  1272. return memBlock;
  1273. }
  1274. template <bool nothrow>
  1275. char*
  1276. Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes)
  1277. {
  1278. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1279. char * addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1280. if (addr == nullptr)
  1281. {
  1282. // Force a collection and try to allocate again.
  1283. this->CollectNow<CollectNowForceInThread>();
  1284. addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1285. if (addr == nullptr)
  1286. {
  1287. if (nothrow == false)
  1288. {
  1289. // Still fails, we are out of memory
  1290. // Since nothrow is false, it's okay to throw here
  1291. this->OutOfMemory();
  1292. }
  1293. else
  1294. {
  1295. return nullptr;
  1296. }
  1297. }
  1298. }
  1299. autoHeap.uncollectedAllocBytes += size;
  1300. return addr;
  1301. }
  1302. // Explicitly instantiate both versions of LargeAlloc
  1303. template char* Recycler::LargeAlloc<true>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1304. template char* Recycler::LargeAlloc<false>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1305. void
  1306. Recycler::OutOfMemory()
  1307. {
  1308. outOfMemoryFunc();
  1309. }
  1310. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(void* recyclerAddr, size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1311. {
  1312. Assert(recyclerAddr);
  1313. return ((Recycler*)recyclerAddr)->GetNormalHeapBlockAllocatorInfoForNativeAllocation(allocSize, allocatorAddress, endAddressOffset, freeListOffset, allowBumpAllocation, isOOPJIT);
  1314. }
  1315. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1316. {
  1317. Assert(HeapInfo::IsAlignedSize(allocSize));
  1318. Assert(HeapInfo::IsSmallObject(allocSize));
  1319. allocatorAddress = (char*)this + offsetof(Recycler, autoHeap) + offsetof(HeapInfo, heapBuckets) +
  1320. sizeof(HeapBucketGroup<SmallAllocationBlockAttributes>)*((uint)(allocSize >> HeapConstants::ObjectAllocationShift) - 1)
  1321. + HeapBucketGroup<SmallAllocationBlockAttributes>::GetHeapBucketOffset()
  1322. + HeapBucketT<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetAllocatorHeadOffset();
  1323. endAddressOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetEndAddressOffset();
  1324. freeListOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetFreeObjectListOffset();;
  1325. if (!isOOPJIT)
  1326. {
  1327. Assert(allocatorAddress == GetAddressOfAllocator<NoBit>(allocSize));
  1328. Assert(endAddressOffset == GetEndAddressOffset<NoBit>(allocSize));
  1329. Assert(freeListOffset == GetFreeObjectListOffset<NoBit>(allocSize));
  1330. Assert(allowBumpAllocation == AllowNativeCodeBumpAllocation());
  1331. }
  1332. if (!allowBumpAllocation)
  1333. {
  1334. freeListOffset = endAddressOffset;
  1335. }
  1336. }
  1337. bool Recycler::AllowNativeCodeBumpAllocation()
  1338. {
  1339. // In debug builds, if we need to track allocation info, we pretend there is no pointer-bump-allocation space
  1340. // on this page, so that we always fail the check in native code and go to helper, which does the tracking.
  1341. #ifdef PROFILE_RECYCLER_ALLOC
  1342. if (this->trackerDictionary != nullptr)
  1343. {
  1344. return false;
  1345. }
  1346. #endif
  1347. #ifdef RECYCLER_MEMORY_VERIFY
  1348. if (this->verifyEnabled)
  1349. {
  1350. return false;
  1351. }
  1352. #endif
  1353. #ifdef RECYCLER_PAGE_HEAP
  1354. // Don't allow bump allocation in the JIT when page heap is turned on
  1355. if (this->IsPageHeapEnabled())
  1356. {
  1357. return false;
  1358. }
  1359. #endif
  1360. return true;
  1361. }
  1362. void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBlock, size_t sizeCat)
  1363. {
  1364. Assert(HeapInfo::IsAlignedSize(sizeCat));
  1365. Assert(HeapInfo::IsSmallObject(sizeCat));
  1366. #ifdef PROFILE_RECYCLER_ALLOC
  1367. AssertMsg(!Recycler::DoProfileAllocTracker(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1368. #endif
  1369. RecyclerMemoryTracking::ReportAllocation(recycler, memBlock, sizeCat);
  1370. RECYCLER_PERF_COUNTER_INC(LiveObject);
  1371. RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, sizeCat);
  1372. RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, sizeCat);
  1373. #ifdef RECYCLER_MEMORY_VERIFY
  1374. AssertMsg(!recycler->VerifyEnabled(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1375. #endif
  1376. }
  1377. /*------------------------------------------------------------------------------------------------
  1378. * FindRoots
  1379. *------------------------------------------------------------------------------------------------*/
  1380. // xplat-todo: Unify these two variants of GetStackBase
  1381. #ifdef _WIN32
  1382. static void* GetStackBase()
  1383. {
  1384. return ((NT_TIB *)NtCurrentTeb())->StackBase;
  1385. }
  1386. #else
  1387. static void* GetStackBase()
  1388. {
  1389. ULONG_PTR highLimit = 0;
  1390. ULONG_PTR lowLimit = 0;
  1391. ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
  1392. return (void*) highLimit;
  1393. }
  1394. #endif
  1395. #if _M_IX86
  1396. // REVIEW: For x86, do we care about scanning esp/ebp?
  1397. // At GC time, they shouldn't be pointing to GC memory.
  1398. #define SAVE_THREAD_CONTEXT() \
  1399. void** targetBuffer = this->savedThreadContext.GetRegisters(); \
  1400. __asm { push eax } \
  1401. __asm { mov eax, targetBuffer } \
  1402. __asm { mov [eax], esp} \
  1403. __asm { mov [eax+0x4], eax} \
  1404. __asm { mov [eax+0x8], ebx} \
  1405. __asm { mov [eax+0xc], ecx} \
  1406. __asm { mov [eax+0x10], edx} \
  1407. __asm { mov [eax+0x14], ebp} \
  1408. __asm { mov [eax+0x18], esi} \
  1409. __asm { mov [eax+0x1c], edi} \
  1410. __asm { pop eax } \
  1411. SAVE_THREAD_ASAN_FAKE_STACK()
  1412. #elif _M_ARM
  1413. #define SAVE_THREAD_CONTEXT() \
  1414. arm_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1415. SAVE_THREAD_ASAN_FAKE_STACK()
  1416. #elif _M_ARM64
  1417. #define SAVE_THREAD_CONTEXT() \
  1418. arm64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1419. SAVE_THREAD_ASAN_FAKE_STACK()
  1420. #elif _M_AMD64
  1421. #define SAVE_THREAD_CONTEXT() \
  1422. amd64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1423. SAVE_THREAD_ASAN_FAKE_STACK()
  1424. #else
  1425. #error Unexpected architecture
  1426. #endif
  1427. size_t
  1428. Recycler::ScanArena(ArenaData * alloc, bool background)
  1429. {
  1430. #if DBG_DUMP
  1431. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1432. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1433. {
  1434. this->forceTraceMark = true;
  1435. Output::Print(_u("Scanning Guest Arena %p: "), alloc);
  1436. }
  1437. #endif
  1438. size_t scanRootBytes = 0;
  1439. BEGIN_DUMP_OBJECT_ADDRESS(_u("Guest Arena"), alloc);
  1440. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  1441. // The new write watch batching logic broke the write watch handling here.
  1442. // For now, just disable write watch for guest arenas.
  1443. // TODO: Re-enable this in the future.
  1444. #if FALSE
  1445. // Note, guest arenas are allocated out of the large block page allocator.
  1446. bool writeWatch = alloc->GetPageAllocator() == &this->recyclerLargeBlockPageAllocator;
  1447. // Only use write watch when we are doing rescan (Partial collect or finish concurrent)
  1448. if (writeWatch && this->collectionState == CollectionStateRescanFindRoots)
  1449. {
  1450. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetBigBlocks(background));
  1451. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetFullBlocks());
  1452. }
  1453. else
  1454. #endif
  1455. #endif
  1456. {
  1457. scanRootBytes += TryMarkBigBlockList(alloc->GetBigBlocks(background));
  1458. scanRootBytes += TryMarkBigBlockList(alloc->GetFullBlocks());
  1459. }
  1460. scanRootBytes += TryMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  1461. END_DUMP_OBJECT(this);
  1462. #if DBG_DUMP
  1463. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1464. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1465. {
  1466. this->forceTraceMark = false;
  1467. Output::Print(_u("\n"));
  1468. Output::Flush();
  1469. }
  1470. #endif
  1471. // The arena has been scanned so the full blocks can be rearranged at this point
  1472. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1473. if (background || !GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan)
  1474. #endif
  1475. {
  1476. alloc->SetLockBlockList(false);
  1477. }
  1478. return scanRootBytes;
  1479. }
  1480. #if DBG
  1481. bool
  1482. Recycler::ExpectStackSkip() const
  1483. {
  1484. // Okay to skip the stack scan if we're in leak check mode
  1485. bool expectStackSkip = false;
  1486. #ifdef LEAK_REPORT
  1487. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  1488. #endif
  1489. #ifdef CHECK_MEMORY_LEAK
  1490. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().CheckMemoryLeak;
  1491. #endif
  1492. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  1493. expectStackSkip = expectStackSkip || (this->objectGraphDumper != nullptr);
  1494. #endif
  1495. #if defined(INTERNAL_MEM_PROTECT_HEAP_ALLOC)
  1496. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().MemProtectHeap;
  1497. #endif
  1498. return expectStackSkip || isExternalStackSkippingGC;
  1499. }
  1500. #endif
  1501. #pragma warning(push)
  1502. #pragma warning(disable:4731) // 'pointer' : frame pointer register 'register' modified by inline assembly code
  1503. // disable address sanitizer, since it doesn't handle custom stack walks well
  1504. NO_SANITIZE_ADDRESS
  1505. size_t
  1506. Recycler::ScanStack()
  1507. {
  1508. if (this->skipStack)
  1509. {
  1510. #ifdef RECYCLER_TRACE
  1511. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  1512. #endif
  1513. #if ENABLE_CONCURRENT_GC
  1514. Assert(this->isFinishGCOnIdle || this->isConcurrentGCOnIdle || this->ExpectStackSkip());
  1515. #else
  1516. Assert(this->ExpectStackSkip());
  1517. #endif
  1518. return 0;
  1519. }
  1520. #ifdef RECYCLER_STATS
  1521. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1522. #endif
  1523. GCETW(GC_SCANSTACK_START, (this));
  1524. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ScanStackPhase);
  1525. SAVE_THREAD_CONTEXT();
  1526. void * stackTop = this->savedThreadContext.GetStackTop();
  1527. void * stackStart = GetStackBase();
  1528. Assert(stackStart > stackTop);
  1529. size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
  1530. #if DBG_DUMP
  1531. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1532. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1533. {
  1534. this->forceTraceMark = true;
  1535. Output::Print(_u("Scanning Stack %p(%8d): "), stackTop, (char *)stackStart - (char *)stackTop);
  1536. }
  1537. #endif
  1538. bool doSpecialMark = collectionWrapper->DoSpecialMarkOnScanStack();
  1539. BEGIN_DUMP_OBJECT(this, _u("Registers"));
  1540. if (doSpecialMark)
  1541. {
  1542. ScanMemoryInline<true>(
  1543. this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1544. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1545. }
  1546. else
  1547. {
  1548. ScanMemoryInline<false>(
  1549. this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1550. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1551. }
  1552. END_DUMP_OBJECT(this);
  1553. BEGIN_DUMP_OBJECT(this, _u("Stack"));
  1554. if (doSpecialMark)
  1555. {
  1556. ScanMemoryInline<true>((void**) stackTop, stackScanned
  1557. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1558. }
  1559. else
  1560. {
  1561. ScanMemoryInline<false>((void**) stackTop, stackScanned
  1562. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1563. }
  1564. END_DUMP_OBJECT(this);
  1565. #if DBG_DUMP
  1566. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1567. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1568. {
  1569. this->forceTraceMark = false;
  1570. Output::Print(_u("\n"));
  1571. Output::Flush();
  1572. }
  1573. #endif
  1574. RECYCLER_PROFILE_EXEC_END(this, Js::ScanStackPhase);
  1575. RECYCLER_STATS_ADD(this, stackCount, this->collectionStats.markData.markCount - lastMarkCount);
  1576. GCETW(GC_SCANSTACK_STOP, (this));
  1577. return stackScanned;
  1578. }
  1579. #pragma warning(pop)
  1580. template <bool background>
  1581. size_t Recycler::ScanPinnedObjects()
  1582. {
  1583. size_t scanRootBytes = 0;
  1584. BEGIN_DUMP_OBJECT(this, _u("Pinned"));
  1585. {
  1586. this->TryMarkNonInterior(transientPinnedObject, &transientPinnedObject /* parentReference */);
  1587. if (this->scanPinnedObjectMap)
  1588. {
  1589. // We are scanning the pinned object map now, we don't need to rescan unless
  1590. // we reset mark or we add stuff to the map in Recycler::AddRef
  1591. this->scanPinnedObjectMap = false;
  1592. pinnedObjectMap.MapAndRemoveIf([this, &scanRootBytes](void * obj, PinRecord const& refCount)
  1593. {
  1594. if (refCount == 0)
  1595. {
  1596. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  1597. #ifdef STACK_BACK_TRACE
  1598. Assert(refCount.stackBackTraces == nullptr);
  1599. #endif
  1600. #endif
  1601. // Only remove if we are not doing this in the background.
  1602. return !background;
  1603. }
  1604. this->TryMarkNonInterior(obj, static_cast<void*>(const_cast<PinRecord*>(&refCount)) /* parentReference */);
  1605. scanRootBytes += sizeof(void *);
  1606. return false;
  1607. });
  1608. if (!background)
  1609. {
  1610. this->hasPendingUnpinnedObject = false;
  1611. }
  1612. }
  1613. }
  1614. END_DUMP_OBJECT(this);
  1615. if (background)
  1616. {
  1617. // Re-enable resize now that we are done
  1618. pinnedObjectMap.EnableResize();
  1619. }
  1620. return scanRootBytes;
  1621. }
  1622. void
  1623. RecyclerScanMemoryCallback::operator()(void** obj, size_t byteCount)
  1624. {
  1625. this->recycler->ScanMemoryInline<false>(obj, byteCount);
  1626. }
  1627. size_t
  1628. Recycler::FindRoots()
  1629. {
  1630. size_t scanRootBytes = 0;
  1631. #ifdef RECYCLER_STATS
  1632. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1633. #endif
  1634. GCETW(GC_SCANROOTS_START, (this));
  1635. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootPhase);
  1636. #ifdef ENABLE_PROJECTION
  1637. {
  1638. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  1639. BEGIN_DUMP_OBJECT(this, _u("External Weak Referenced Roots"));
  1640. Assert(!this->IsInRefCountTrackingForProjection());
  1641. #if DBG
  1642. AutoIsInRefCountTrackingForProjection autoIsInRefCountTrackingForProjection(this);
  1643. #endif
  1644. collectionWrapper->MarkExternalWeakReferencedObjects(this->inPartialCollectMode);
  1645. END_DUMP_OBJECT(this);
  1646. }
  1647. #endif
  1648. // go through ITracker* stuff. Don't need to do it if we are doing a partial collection
  1649. // as we keep track and mark all trackable objects.
  1650. // Do this first because the host might unpin stuff in the process
  1651. if (externalRootMarker != NULL)
  1652. {
  1653. #if ENABLE_PARTIAL_GC
  1654. if (!this->inPartialCollectMode)
  1655. #endif
  1656. {
  1657. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootExtPhase);
  1658. #if DBG_DUMP
  1659. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1660. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1661. {
  1662. this->forceTraceMark = true;
  1663. Output::Print(_u("Scanning External Roots: "));
  1664. }
  1665. #endif
  1666. BEGIN_DUMP_OBJECT(this, _u("External Roots"));
  1667. // PARTIALGC-TODO: How do we count external roots?
  1668. externalRootMarker(externalRootMarkerContext);
  1669. END_DUMP_OBJECT(this);
  1670. #if DBG_DUMP
  1671. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1672. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1673. {
  1674. this->forceTraceMark = false;
  1675. Output::Print(_u("\n"));
  1676. Output::Flush();
  1677. }
  1678. #endif
  1679. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootExtPhase);
  1680. }
  1681. }
  1682. #if DBG_DUMP
  1683. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1684. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1685. {
  1686. this->forceTraceMark = true;
  1687. Output::Print(_u("Scanning Pinned Objects: "));
  1688. }
  1689. #endif
  1690. scanRootBytes += this->ScanPinnedObjects</*background = */false>();
  1691. #if DBG_DUMP
  1692. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1693. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1694. {
  1695. this->forceTraceMark = false;
  1696. Output::Print(_u("\n"));
  1697. Output::Flush();
  1698. }
  1699. #endif
  1700. #if ENABLE_CONCURRENT_GC
  1701. Assert(!this->hasPendingConcurrentFindRoot);
  1702. #endif
  1703. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootArenaPhase);
  1704. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  1705. while (guestArenaIter.Next())
  1706. {
  1707. GuestArenaAllocator& allocator = guestArenaIter.Data();
  1708. #if ENABLE_CONCURRENT_GC
  1709. if (allocator.pendingDelete)
  1710. {
  1711. Assert(this->hasPendingDeleteGuestArena);
  1712. allocator.SetLockBlockList(false);
  1713. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  1714. }
  1715. else if (this->backgroundFinishMarkCount == 0)
  1716. #endif
  1717. {
  1718. // Only scan arena if we haven't finished mark in the background
  1719. // (which is true if concurrent GC is disabled)
  1720. scanRootBytes += ScanArena(&allocator, false);
  1721. }
  1722. }
  1723. this->hasPendingDeleteGuestArena = false;
  1724. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  1725. while (externalGuestArenaIter.Next())
  1726. {
  1727. scanRootBytes += ScanArena(externalGuestArenaIter.Data(), false);
  1728. }
  1729. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootArenaPhase);
  1730. this->ScanImplicitRoots();
  1731. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootPhase);
  1732. GCETW(GC_SCANROOTS_STOP, (this));
  1733. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  1734. return scanRootBytes;
  1735. }
  1736. void
  1737. Recycler::ScanImplicitRoots()
  1738. {
  1739. if (this->enableScanImplicitRoots)
  1740. {
  1741. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindImplicitRootPhase);
  1742. if (!this->hasScannedInitialImplicitRoots)
  1743. {
  1744. this->ScanInitialImplicitRoots();
  1745. this->hasScannedInitialImplicitRoots = true;
  1746. }
  1747. else
  1748. {
  1749. this->ScanNewImplicitRoots();
  1750. }
  1751. RECYCLER_PROFILE_EXEC_END(this, Js::FindImplicitRootPhase);
  1752. }
  1753. }
  1754. size_t
  1755. Recycler::TryMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  1756. {
  1757. size_t scanRootBytes = 0;
  1758. ArenaMemoryBlock *blockp = memoryBlocks;
  1759. while (blockp != NULL)
  1760. {
  1761. void** base=(void**)blockp->GetBytes();
  1762. size_t byteCount = blockp->nbytes;
  1763. scanRootBytes += byteCount;
  1764. this->ScanMemory<false>(base, byteCount);
  1765. blockp = blockp->next;
  1766. }
  1767. return scanRootBytes;
  1768. }
  1769. #if ENABLE_CONCURRENT_GC
  1770. #if FALSE
  1771. size_t
  1772. Recycler::TryMarkBigBlockListWithWriteWatch(BigBlock * memoryBlocks)
  1773. {
  1774. DWORD pageSize = AutoSystemInfo::PageSize;
  1775. size_t scanRootBytes = 0;
  1776. BigBlock *blockp = memoryBlocks;
  1777. // Reset the write watch bit if we are scanning this in the background thread
  1778. DWORD const writeWatchFlags = this->IsConcurrentFindRootState()? WRITE_WATCH_FLAG_RESET : 0;
  1779. while (blockp != NULL)
  1780. {
  1781. char * currentAddress = (char *)blockp->GetBytes();
  1782. char * endAddress = currentAddress + blockp->currentByte;
  1783. char * currentPageStart = (char *)blockp->allocation;
  1784. while (currentAddress < endAddress)
  1785. {
  1786. void * written;
  1787. ULONG_PTR count = 1;
  1788. if (::GetWriteWatch(writeWatchFlags, currentPageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) != 0 || count == 1)
  1789. {
  1790. char * currentEnd = min(currentPageStart + pageSize, endAddress);
  1791. size_t byteCount = (size_t)(currentEnd - currentAddress);
  1792. scanRootBytes += byteCount;
  1793. this->ScanMemory<false>((void **)currentAddress, byteCount);
  1794. }
  1795. currentPageStart += pageSize;
  1796. currentAddress = currentPageStart;
  1797. }
  1798. blockp = blockp->nextBigBlock;
  1799. }
  1800. return scanRootBytes;
  1801. }
  1802. #endif
  1803. #endif
  1804. size_t
  1805. Recycler::TryMarkBigBlockList(BigBlock * memoryBlocks)
  1806. {
  1807. size_t scanRootBytes = 0;
  1808. BigBlock *blockp = memoryBlocks;
  1809. while (blockp != NULL)
  1810. {
  1811. void** base = (void**)blockp->GetBytes();
  1812. size_t byteCount = blockp->currentByte;
  1813. scanRootBytes += byteCount;
  1814. this->ScanMemory<false>(base, byteCount);
  1815. blockp = blockp->nextBigBlock;
  1816. }
  1817. return scanRootBytes;
  1818. }
  1819. void
  1820. Recycler::ScanInitialImplicitRoots()
  1821. {
  1822. autoHeap.ScanInitialImplicitRoots();
  1823. }
  1824. void
  1825. Recycler::ScanNewImplicitRoots()
  1826. {
  1827. autoHeap.ScanNewImplicitRoots();
  1828. }
  1829. /*------------------------------------------------------------------------------------------------
  1830. * Mark
  1831. *------------------------------------------------------------------------------------------------*/
  1832. void
  1833. Recycler::ResetMarks(ResetMarkFlags flags)
  1834. {
  1835. Assert(!this->CollectionInProgress());
  1836. collectionState = CollectionStateResetMarks;
  1837. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Reset marks\n"));
  1838. GCETW(GC_RESETMARKS_START, (this));
  1839. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetMarksPhase);
  1840. Assert(IsMarkStackEmpty());
  1841. this->scanPinnedObjectMap = true;
  1842. this->hasScannedInitialImplicitRoots = false;
  1843. heapBlockMap.ResetMarks();
  1844. autoHeap.ResetMarks(flags);
  1845. RECYCLER_PROFILE_EXEC_END(this, Js::ResetMarksPhase);
  1846. GCETW(GC_RESETMARKS_STOP, (this));
  1847. #ifdef RECYCLER_MARK_TRACK
  1848. this->ClearMarkMap();
  1849. #endif
  1850. }
  1851. #ifdef RECYCLER_MARK_TRACK
  1852. void Recycler::ClearMarkMap()
  1853. {
  1854. this->markMap->Clear();
  1855. }
  1856. void Recycler::PrintMarkMap()
  1857. {
  1858. this->markMap->Map([](void* key, void* value)
  1859. {
  1860. Output::Print(_u("0x%P => 0x%P\n"), key, value);
  1861. });
  1862. }
  1863. #endif
  1864. #if DBG
  1865. void
  1866. Recycler::CheckAllocExternalMark() const
  1867. {
  1868. Assert(!disableThreadAccessCheck);
  1869. Assert(GetCurrentThreadContextId() == mainThreadId);
  1870. #if ENABLE_CONCURRENT_GC
  1871. #ifdef HEAP_ENUMERATION_VALIDATION
  1872. Assert((this->IsMarkState() || this->IsPostEnumHeapValidationInProgress()) && collectionState != CollectionStateConcurrentMark);
  1873. #else
  1874. Assert(this->IsMarkState() && collectionState != CollectionStateConcurrentMark);
  1875. #endif
  1876. #else
  1877. Assert(this->IsMarkState());
  1878. #endif
  1879. }
  1880. #endif
  1881. void
  1882. Recycler::TryMarkNonInterior(void* candidate, void* parentReference)
  1883. {
  1884. #ifdef HEAP_ENUMERATION_VALIDATION
  1885. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1886. #else
  1887. Assert(!isHeapEnumInProgress);
  1888. #endif
  1889. Assert(this->collectionState != CollectionStateParallelMark);
  1890. markContext.Mark</*parallel */ false, /* interior */ false, /* doSpecialMark */ false>(candidate, parentReference);
  1891. }
  1892. void
  1893. Recycler::TryMarkInterior(void* candidate, void* parentReference)
  1894. {
  1895. #ifdef HEAP_ENUMERATION_VALIDATION
  1896. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1897. #else
  1898. Assert(!isHeapEnumInProgress);
  1899. #endif
  1900. Assert(this->collectionState != CollectionStateParallelMark);
  1901. markContext.Mark</*parallel */ false, /* interior */ true, /* doSpecialMark */ false>(candidate, parentReference);
  1902. }
  1903. template <bool parallel, bool interior>
  1904. void
  1905. Recycler::ProcessMarkContext(MarkContext * markContext)
  1906. {
  1907. #if ENABLE_CONCURRENT_GC
  1908. // Copying the markContext onto the stack messes up tracked object handling, because
  1909. // the tracked object will call TryMark[Non]Interior to report its references.
  1910. // These functions implicitly use the main markContext on the Recycler, but this will
  1911. // be overridden if we're processing the main markContext here.
  1912. // So, don't do this if we are going to process tracked objects.
  1913. // (This will be the case if we're not queuing and we're not in partial mode, which ignores tracked objects.)
  1914. // In this case we shouldn't be parallel anyway, so we don't need to worry about cache behavior.
  1915. // We should revisit how we manage markContexts in general in the future, and clean this up
  1916. // by passing the MarkContext through to the tracked object's Mark method.
  1917. #if ENABLE_PARTIAL_GC
  1918. if (this->inPartialCollectMode || DoQueueTrackedObject())
  1919. #else
  1920. if (DoQueueTrackedObject())
  1921. #endif
  1922. {
  1923. // The markContext as passed is one of the markContexts that lives on the Recycler.
  1924. // Copy it locally for processing.
  1925. // This serves two purposes:
  1926. // (1) Allow for better codegen because the markContext is local and we don't need to track the this pointer separately
  1927. // (because all the key processing is inlined into this function).
  1928. // (2) Ensure we don't have weird cache behavior because we're accidentally writing to the same cache line from
  1929. // multiple threads during parallel marking.
  1930. MarkContext localMarkContext = *markContext;
  1931. // Do the actual marking.
  1932. localMarkContext.ProcessMark<parallel, interior>();
  1933. // Copy back to the original location.
  1934. *markContext = localMarkContext;
  1935. // Clear the local mark context.
  1936. localMarkContext.Clear();
  1937. }
  1938. else
  1939. #endif
  1940. {
  1941. Assert(!parallel);
  1942. markContext->ProcessMark<parallel, interior>();
  1943. }
  1944. }
  1945. void
  1946. Recycler::ProcessMark(bool background)
  1947. {
  1948. #if ENABLE_CONCURRENT_GC
  1949. if (background)
  1950. {
  1951. GCETW(GC_BACKGROUNDMARK_START, (this, backgroundRescanCount));
  1952. }
  1953. else
  1954. #endif
  1955. {
  1956. GCETW(GC_MARK_START, (this));
  1957. }
  1958. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1959. if (this->enableScanInteriorPointers)
  1960. {
  1961. this->ProcessMarkContext</* parallel */ false, /* interior */ true>(&markContext);
  1962. }
  1963. else
  1964. {
  1965. this->ProcessMarkContext</* parallel */ false, /* interior */ false>(&markContext);
  1966. }
  1967. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  1968. #if ENABLE_CONCURRENT_GC
  1969. if (background)
  1970. {
  1971. GCETW(GC_BACKGROUNDMARK_STOP, (this, backgroundRescanCount));
  1972. }
  1973. else
  1974. #endif
  1975. {
  1976. GCETW(GC_MARK_STOP, (this));
  1977. }
  1978. DebugOnly(this->markContext.VerifyPostMarkState());
  1979. }
  1980. void
  1981. Recycler::ProcessParallelMark(bool background, MarkContext * markContext)
  1982. {
  1983. #if ENABLE_CONCURRENT_GC
  1984. if (background)
  1985. {
  1986. GCETW(GC_BACKGROUNDPARALLELMARK_START, (this, backgroundRescanCount));
  1987. }
  1988. else
  1989. #endif
  1990. {
  1991. GCETW(GC_PARALLELMARK_START, (this));
  1992. }
  1993. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1994. if (this->enableScanInteriorPointers)
  1995. {
  1996. this->ProcessMarkContext</* parallel */ true, /* interior */ true>(markContext);
  1997. }
  1998. else
  1999. {
  2000. this->ProcessMarkContext</* parallel */ true, /* interior */ false>(markContext);
  2001. }
  2002. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  2003. #if ENABLE_CONCURRENT_GC
  2004. if (background)
  2005. {
  2006. GCETW(GC_BACKGROUNDPARALLELMARK_STOP, (this, backgroundRescanCount));
  2007. }
  2008. else
  2009. #endif
  2010. {
  2011. GCETW(GC_PARALLELMARK_STOP, (this));
  2012. }
  2013. }
  2014. void
  2015. Recycler::Mark()
  2016. {
  2017. // Marking in thread, we can just pre-mark them
  2018. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InThreadImplicitRoots : ResetMarkFlags_InThread);
  2019. collectionState = CollectionStateFindRoots;
  2020. RootMark(CollectionStateMark);
  2021. }
  2022. #if ENABLE_CONCURRENT_GC
  2023. void
  2024. Recycler::StartQueueTrackedObject()
  2025. {
  2026. Assert(!this->queueTrackedObject);
  2027. Assert(!this->HasPendingTrackObjects());
  2028. #if ENABLE_PARTIAL_GC
  2029. Assert(this->clientTrackedObjectList.Empty());
  2030. Assert(!this->inPartialCollectMode);
  2031. #endif
  2032. this->queueTrackedObject = true;
  2033. }
  2034. bool
  2035. Recycler::DoQueueTrackedObject() const
  2036. {
  2037. Assert(this->queueTrackedObject || !this->IsConcurrentMarkState());
  2038. Assert(this->queueTrackedObject || this->isProcessingTrackedObjects || !this->HasPendingTrackObjects());
  2039. #if ENABLE_PARTIAL_GC
  2040. Assert(this->queueTrackedObject || this->inPartialCollectMode || !(this->collectionState == CollectionStateParallelMark));
  2041. Assert(!this->queueTrackedObject || (this->clientTrackedObjectList.Empty() && !this->inPartialCollectMode));
  2042. #else
  2043. Assert(this->queueTrackedObject || !(this->collectionState == CollectionStateParallelMark));
  2044. #endif
  2045. return this->queueTrackedObject;
  2046. }
  2047. #endif
  2048. void
  2049. Recycler::ResetCollectionState()
  2050. {
  2051. Assert(IsMarkStackEmpty());
  2052. this->collectionState = CollectionStateNotCollecting;
  2053. #if ENABLE_CONCURRENT_GC
  2054. this->backgroundFinishMarkCount = 0;
  2055. #endif
  2056. this->inExhaustiveCollection = false;
  2057. this->inDecommitNowCollection = false;
  2058. #if ENABLE_CONCURRENT_GC
  2059. CleanupPendingUnroot();
  2060. #endif
  2061. #if ENABLE_PARTIAL_GC
  2062. if (inPartialCollectMode)
  2063. {
  2064. FinishPartialCollect();
  2065. }
  2066. #endif
  2067. #if ENABLE_CONCURRENT_GC
  2068. Assert(!this->DoQueueTrackedObject());
  2069. #endif
  2070. #ifdef RECYCLER_FINALIZE_CHECK
  2071. // Reset the collection stats.
  2072. this->collectionStats.finalizeCount = this->autoHeap.liveFinalizableObjectCount - this->autoHeap.newFinalizableObjectCount - this->autoHeap.pendingDisposableObjectCount;
  2073. #endif
  2074. }
  2075. void
  2076. Recycler::ResetMarkCollectionState()
  2077. {
  2078. // If we aborted after doing a background Rescan, there will be entries in the markContext.
  2079. // Abort these entries and reset the markContext state.
  2080. markContext.Abort();
  2081. // If we aborted after doing a background parallel Mark, we wouldn't have cleaned up the
  2082. // parallel markContexts yet. Clean these up now.
  2083. // Note parallelMarkContext1 is not used in background parallel (see DoBackgroundParallelMark)
  2084. parallelMarkContext2.Cleanup();
  2085. parallelMarkContext3.Cleanup();
  2086. this->ClearNeedOOMRescan();
  2087. DebugOnly(this->isProcessingRescan = false);
  2088. #if ENABLE_CONCURRENT_GC
  2089. // If we're reseting the mark collection state, we need to unlock the block list
  2090. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  2091. while (guestArenaIter.Next())
  2092. {
  2093. GuestArenaAllocator& allocator = guestArenaIter.Data();
  2094. allocator.SetLockBlockList(false);
  2095. }
  2096. this->queueTrackedObject = false;
  2097. #endif
  2098. ResetCollectionState();
  2099. }
  2100. void
  2101. Recycler::ResetHeuristicCounters()
  2102. {
  2103. autoHeap.lastUncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  2104. autoHeap.uncollectedAllocBytes = 0;
  2105. autoHeap.uncollectedExternalBytes = 0;
  2106. ResetPartialHeuristicCounters();
  2107. }
  2108. void Recycler::ResetPartialHeuristicCounters()
  2109. {
  2110. #if ENABLE_PARTIAL_GC
  2111. autoHeap.uncollectedNewPageCount = 0;
  2112. #endif
  2113. }
  2114. void
  2115. Recycler::ScheduleNextCollection()
  2116. {
  2117. this->tickCountNextCollection = ::GetTickCount() + RecyclerHeuristic::TickCountCollection;
  2118. this->tickCountNextFinishCollection = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  2119. }
  2120. #if ENABLE_CONCURRENT_GC
  2121. void
  2122. Recycler::PrepareSweep()
  2123. {
  2124. autoHeap.PrepareSweep();
  2125. }
  2126. #endif
  2127. size_t
  2128. Recycler::RescanMark(DWORD waitTime)
  2129. {
  2130. bool const onLowMemory = this->NeedOOMRescan();
  2131. // REVIEW: Why are we asserting for DoQueueTrackedObject here?
  2132. // Should we split this into different asserts depending on whether
  2133. // concurrent or partial is enabled?
  2134. #if ENABLE_CONCURRENT_GC
  2135. #if ENABLE_PARTIAL_GC
  2136. Assert(this->inPartialCollectMode || DoQueueTrackedObject());
  2137. #else
  2138. Assert(DoQueueTrackedObject());
  2139. #endif
  2140. #endif
  2141. {
  2142. // We are about to do a rescan mark, which for consistency requires the runtime to stop any additional mutator threads
  2143. AUTO_NO_EXCEPTION_REGION;
  2144. collectionWrapper->PreRescanMarkCallback();
  2145. }
  2146. // Always called in-thread
  2147. Assert(collectionState == CollectionStateRescanFindRoots);
  2148. #if ENABLE_CONCURRENT_GC
  2149. if (!onLowMemory && // Don't do background finish mark if we are low on memory
  2150. // Only do background finish mark if we have a time limit or it is forced
  2151. (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase) || waitTime != INFINITE) &&
  2152. // Don't do background finish mark if we failed to finish mark too many times
  2153. (this->backgroundFinishMarkCount < RecyclerHeuristic::MaxBackgroundFinishMarkCount(this->GetRecyclerFlagsTable())))
  2154. {
  2155. this->PrepareBackgroundFindRoots();
  2156. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  2157. {
  2158. this->backgroundFinishMarkCount++;
  2159. this->PrepareSweep();
  2160. GCETW(GC_RESCANMARKWAIT_START, (this, waitTime));
  2161. const BOOL waited = WaitForConcurrentThread(waitTime);
  2162. GCETW(GC_RESCANMARKWAIT_STOP, (this, !waited));
  2163. if (!waited)
  2164. {
  2165. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase, _u("Finish mark timed out\n"));
  2166. {
  2167. // We timed out doing the finish mark, notify the runtime
  2168. AUTO_NO_EXCEPTION_REGION;
  2169. collectionWrapper->RescanMarkTimeoutCallback();
  2170. }
  2171. return Recycler::InvalidScanRootBytes;
  2172. }
  2173. Assert(collectionState == CollectionStateRescanWait);
  2174. collectionState = CollectionStateRescanFindRoots;
  2175. #ifdef RECYCLER_WRITE_WATCH
  2176. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2177. {
  2178. Assert(recyclerPageAllocator.GetWriteWatchPageCount() == 0);
  2179. Assert(recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0);
  2180. }
  2181. #endif
  2182. return this->backgroundRescanRootBytes;
  2183. }
  2184. this->RevertPrepareBackgroundFindRoots();
  2185. }
  2186. #endif
  2187. #if ENABLE_CONCURRENT_GC
  2188. this->backgroundFinishMarkCount = 0;
  2189. #endif
  2190. return FinishMarkRescan(false) * AutoSystemInfo::PageSize;
  2191. }
  2192. size_t
  2193. Recycler::FinishMark(DWORD waitTime)
  2194. {
  2195. size_t scannedRootBytes = RescanMark(waitTime);
  2196. Assert(waitTime != INFINITE || scannedRootBytes != Recycler::InvalidScanRootBytes);
  2197. if (scannedRootBytes != Recycler::InvalidScanRootBytes)
  2198. {
  2199. #if DBG && ENABLE_PARTIAL_GC
  2200. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("CTO: %d\n"), this->clientTrackedObjectList.Count());
  2201. #endif
  2202. #if ENABLE_PARTIAL_GC
  2203. if (this->inPartialCollectMode)
  2204. {
  2205. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing client tracked objects\n"));
  2206. ProcessClientTrackedObjects();
  2207. }
  2208. else
  2209. #endif
  2210. #if ENABLE_CONCURRENT_GC
  2211. if (DoQueueTrackedObject())
  2212. {
  2213. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing regular tracked objects\n"));
  2214. ProcessTrackedObjects();
  2215. #ifdef RECYCLER_WRITE_WATCH
  2216. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2217. {
  2218. Assert(this->backgroundFinishMarkCount == 0 ||
  2219. (this->recyclerPageAllocator.GetWriteWatchPageCount() == 0 &&
  2220. this->recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0));
  2221. }
  2222. #endif
  2223. }
  2224. #endif
  2225. // Continue to mark from root one more time
  2226. scannedRootBytes += RootMark(CollectionStateRescanMark);
  2227. }
  2228. return scannedRootBytes;
  2229. }
  2230. #if ENABLE_CONCURRENT_GC
  2231. void
  2232. Recycler::DoParallelMark()
  2233. {
  2234. Assert(this->enableParallelMark);
  2235. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2236. // Split the mark stack into [this->maxParallelism] equal pieces.
  2237. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2238. MarkContext * splitContexts[3] = { &parallelMarkContext1, &parallelMarkContext2, &parallelMarkContext3 };
  2239. uint actualSplitCount = markContext.Split(this->maxParallelism - 1, splitContexts);
  2240. Assert(actualSplitCount <= 3);
  2241. // If we failed to split at all, just mark in thread with no parallelism.
  2242. if (actualSplitCount == 0)
  2243. {
  2244. this->ProcessMark(false);
  2245. return;
  2246. }
  2247. // We need to queue tracked objects while we mark in parallel.
  2248. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2249. #if ENABLE_PARTIAL_GC
  2250. if (!this->inPartialCollectMode)
  2251. #endif
  2252. {
  2253. StartQueueTrackedObject();
  2254. }
  2255. // Kick off marking on the background thread
  2256. bool concurrentSuccess = StartConcurrent(CollectionStateParallelMark);
  2257. // If there's enough work to split, then kick off marking on parallel threads too.
  2258. // If the threads haven't been created yet, this will create them (or fail).
  2259. bool parallelSuccess1 = false;
  2260. bool parallelSuccess2 = false;
  2261. if (concurrentSuccess && actualSplitCount >= 2)
  2262. {
  2263. parallelSuccess1 = parallelThread1.StartConcurrent();
  2264. if (parallelSuccess1 && actualSplitCount == 3)
  2265. {
  2266. parallelSuccess2 = parallelThread2.StartConcurrent();
  2267. }
  2268. }
  2269. // Process our portion of the split.
  2270. this->ProcessParallelMark(false, &parallelMarkContext1);
  2271. // If we successfully launched parallel work, wait for it to complete.
  2272. // If we failed, then process the work in-thread now.
  2273. if (concurrentSuccess)
  2274. {
  2275. WaitForConcurrentThread(INFINITE);
  2276. }
  2277. else
  2278. {
  2279. this->ProcessParallelMark(false, &markContext);
  2280. }
  2281. if (actualSplitCount >= 2)
  2282. {
  2283. if (parallelSuccess1)
  2284. {
  2285. parallelThread1.WaitForConcurrent();
  2286. }
  2287. else
  2288. {
  2289. this->ProcessParallelMark(false, &parallelMarkContext2);
  2290. }
  2291. if (actualSplitCount == 3)
  2292. {
  2293. if (parallelSuccess2)
  2294. {
  2295. parallelThread2.WaitForConcurrent();
  2296. }
  2297. else
  2298. {
  2299. this->ProcessParallelMark(false, &parallelMarkContext3);
  2300. }
  2301. }
  2302. }
  2303. this->collectionState = CollectionStateMark;
  2304. // Process tracked objects, if any, then do one final mark phase in case they marked any new objects.
  2305. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2306. #if ENABLE_PARTIAL_GC
  2307. if (!this->inPartialCollectMode)
  2308. #endif
  2309. {
  2310. this->ProcessTrackedObjects();
  2311. this->ProcessMark(false);
  2312. }
  2313. #if ENABLE_PARTIAL_GC
  2314. else
  2315. {
  2316. Assert(!this->HasPendingTrackObjects());
  2317. }
  2318. #endif
  2319. }
  2320. void
  2321. Recycler::DoBackgroundParallelMark()
  2322. {
  2323. // Split the mark stack into [this->maxParallelism - 1] equal pieces (thus, "- 2" below).
  2324. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2325. // The parallel threads are hardwired to use parallelMarkContext2/3, so we split using those.
  2326. uint actualSplitCount = 0;
  2327. MarkContext * splitContexts[2] = { &parallelMarkContext2, &parallelMarkContext3 };
  2328. if (this->enableParallelMark)
  2329. {
  2330. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2331. if (this->maxParallelism > 2)
  2332. {
  2333. actualSplitCount = markContext.Split(this->maxParallelism - 2, splitContexts);
  2334. }
  2335. }
  2336. Assert(actualSplitCount <= 2);
  2337. // If we failed to split at all, just mark in thread with no parallelism.
  2338. if (actualSplitCount == 0)
  2339. {
  2340. this->ProcessMark(true);
  2341. return;
  2342. }
  2343. #if ENABLE_PARTIAL_GC
  2344. // We should already be set up to queue tracked objects, unless this is a partial collect
  2345. Assert(this->DoQueueTrackedObject() || this->inPartialCollectMode);
  2346. #else
  2347. Assert(this->DoQueueTrackedObject());
  2348. #endif
  2349. this->collectionState = CollectionStateBackgroundParallelMark;
  2350. // Kick off marking on parallel threads too, if there is work for them
  2351. // If the threads haven't been created yet, this will create them (or fail).
  2352. bool parallelSuccess1 = false;
  2353. bool parallelSuccess2 = false;
  2354. parallelSuccess1 = parallelThread1.StartConcurrent();
  2355. if (parallelSuccess1 && actualSplitCount == 2)
  2356. {
  2357. parallelSuccess2 = parallelThread2.StartConcurrent();
  2358. }
  2359. // Process our portion of the split.
  2360. this->ProcessParallelMark(true, &markContext);
  2361. // If we successfully launched parallel work, wait for it to complete.
  2362. // If we failed, then process the work in-thread now.
  2363. if (parallelSuccess1)
  2364. {
  2365. parallelThread1.WaitForConcurrent();
  2366. }
  2367. else
  2368. {
  2369. this->ProcessParallelMark(true, &parallelMarkContext2);
  2370. }
  2371. if (actualSplitCount == 2)
  2372. {
  2373. if (parallelSuccess2)
  2374. {
  2375. parallelThread2.WaitForConcurrent();
  2376. }
  2377. else
  2378. {
  2379. this->ProcessParallelMark(true, &parallelMarkContext3);
  2380. }
  2381. }
  2382. this->collectionState = CollectionStateConcurrentMark;
  2383. }
  2384. #endif
  2385. size_t
  2386. Recycler::RootMark(CollectionState markState)
  2387. {
  2388. size_t scannedRootBytes = 0;
  2389. Assert(!this->NeedOOMRescan() || markState == CollectionStateRescanMark);
  2390. #if ENABLE_PARTIAL_GC
  2391. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect: %d\n"), this->inPartialCollectMode);
  2392. #else
  2393. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect not available\n"));
  2394. #endif
  2395. Assert(collectionState == (markState == CollectionStateMark? CollectionStateFindRoots : CollectionStateRescanFindRoots));
  2396. BOOL stacksScannedByRuntime = FALSE;
  2397. {
  2398. // We are about to scan roots in thread, notify the runtime first so it can stop threads if necessary and also provide additional roots
  2399. AUTO_NO_EXCEPTION_REGION;
  2400. RecyclerScanMemoryCallback scanMemory(this);
  2401. scannedRootBytes += collectionWrapper->RootMarkCallback(scanMemory, &stacksScannedByRuntime);
  2402. }
  2403. scannedRootBytes += FindRoots();
  2404. if (!stacksScannedByRuntime)
  2405. {
  2406. // The runtime did not scan the stack(s) for us, so we use the normal Recycler code.
  2407. scannedRootBytes += ScanStack();
  2408. }
  2409. this->collectionState = markState;
  2410. #if ENABLE_CONCURRENT_GC
  2411. if (this->enableParallelMark)
  2412. {
  2413. this->DoParallelMark();
  2414. }
  2415. else
  2416. #endif
  2417. {
  2418. this->ProcessMark(false);
  2419. }
  2420. if (this->EndMark())
  2421. {
  2422. // REVIEW: This heuristic doesn't apply when partial is off so there's no need
  2423. // to modify scannedRootBytes here, correct?
  2424. #if ENABLE_PARTIAL_GC
  2425. // return large root scanned byte to not get into partial mode if we are low on memory
  2426. scannedRootBytes = RecyclerSweep::MaxPartialCollectRescanRootBytes + 1;
  2427. #endif
  2428. }
  2429. return scannedRootBytes;
  2430. }
  2431. bool
  2432. Recycler::EndMarkCheckOOMRescan()
  2433. {
  2434. bool oomRescan = false;
  2435. if (this->NeedOOMRescan())
  2436. {
  2437. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  2438. if (this->objectGraphDumper)
  2439. {
  2440. // Do not complete the mark if we are just dumping the object graph
  2441. // Just report out of memory
  2442. this->objectGraphDumper->isOutOfMemory = true;
  2443. this->ClearNeedOOMRescan();
  2444. }
  2445. else
  2446. #endif
  2447. {
  2448. EndMarkOnLowMemory();
  2449. oomRescan = true;
  2450. }
  2451. }
  2452. // Done with the mark stack, it should be empty.
  2453. // Release pages it is holding.
  2454. Assert(!HasPendingMarkObjects());
  2455. Assert(!HasPendingTrackObjects());
  2456. return oomRescan;
  2457. }
  2458. bool
  2459. Recycler::EndMark()
  2460. {
  2461. #if ENABLE_CONCURRENT_GC
  2462. Assert(!this->DoQueueTrackedObject());
  2463. #endif
  2464. #if ENABLE_PARTIAL_GC
  2465. Assert(this->clientTrackedObjectList.Empty());
  2466. #endif
  2467. {
  2468. // We have finished marking
  2469. AUTO_NO_EXCEPTION_REGION;
  2470. collectionWrapper->EndMarkCallback();
  2471. }
  2472. bool oomRescan = EndMarkCheckOOMRescan();
  2473. if (ProcessObjectBeforeCollectCallbacks())
  2474. {
  2475. // callbacks may trigger additional marking, need to check OOMRescan again
  2476. oomRescan |= EndMarkCheckOOMRescan();
  2477. }
  2478. // GC-CONSIDER: Consider keeping some page around
  2479. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_START, (this));
  2480. // Clean up mark contexts, which will release held free pages
  2481. // Do this for all contexts before we decommit, to make sure all pages are freed
  2482. markContext.Cleanup();
  2483. parallelMarkContext1.Cleanup();
  2484. parallelMarkContext2.Cleanup();
  2485. parallelMarkContext3.Cleanup();
  2486. // Decommit all pages
  2487. markContext.DecommitPages();
  2488. parallelMarkContext1.DecommitPages();
  2489. parallelMarkContext2.DecommitPages();
  2490. parallelMarkContext3.DecommitPages();
  2491. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_STOP, (this));
  2492. return oomRescan;
  2493. }
  2494. void
  2495. Recycler::EndMarkOnLowMemory()
  2496. {
  2497. GCETW(GC_ENDMARKONLOWMEMORY_START, (this));
  2498. Assert(this->NeedOOMRescan());
  2499. this->inEndMarkOnLowMemory = true;
  2500. // Treat this as a concurrent mark reset so that we don't invalidate the allocators
  2501. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("OOM during mark- rerunning mark\n"));
  2502. // Try to release as much memory as possible
  2503. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  2504. {
  2505. pageAlloc->DecommitNow();
  2506. });
  2507. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2508. uint iterations = 0;
  2509. #endif
  2510. do
  2511. {
  2512. #if ENABLE_PARTIAL_GC
  2513. Assert(this->clientTrackedObjectList.Empty());
  2514. #endif
  2515. #if ENABLE_CONCURRENT_GC
  2516. // Always queue tracked objects during rescan, to avoid changes to mark state.
  2517. // (Unless we're in a partial, in which case we ignore tracked objects)
  2518. Assert(!this->DoQueueTrackedObject());
  2519. #if ENABLE_PARTIAL_GC
  2520. if (!this->inPartialCollectMode)
  2521. #endif
  2522. {
  2523. this->StartQueueTrackedObject();
  2524. }
  2525. #endif
  2526. this->collectionState = CollectionStateRescanFindRoots;
  2527. this->ClearNeedOOMRescan();
  2528. #if DBG
  2529. Assert(!this->isProcessingRescan);
  2530. this->isProcessingRescan = true;
  2531. #endif
  2532. if (!heapBlockMap.OOMRescan(this))
  2533. {
  2534. // Kill the process- we couldn't even rescan a single block
  2535. // We are in pretty low memory state at this point
  2536. // The fail-fast is present for two reasons:
  2537. // 1) Defense-in-depth for cases we hadn't thought about
  2538. // 2) Deal with cases like -MaxMarkStackPageCount:1 which can still hang without the fail-fast
  2539. MarkStack_OOM_fatal_error();
  2540. }
  2541. autoHeap.Rescan(RescanFlags_None);
  2542. DebugOnly(this->isProcessingRescan = false);
  2543. this->ProcessMark(false);
  2544. #if ENABLE_CONCURRENT_GC
  2545. // Process any tracked objects we found
  2546. #if ENABLE_PARTIAL_GC
  2547. if (!this->inPartialCollectMode)
  2548. #endif
  2549. {
  2550. ProcessTrackedObjects();
  2551. }
  2552. #endif
  2553. // Drain the mark stack
  2554. ProcessMark(false);
  2555. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2556. iterations++;
  2557. #endif
  2558. }
  2559. while (this->NeedOOMRescan());
  2560. Assert(!markContext.GetPageAllocator()->DisableAllocationOutOfMemory());
  2561. Assert(!parallelMarkContext1.GetPageAllocator()->DisableAllocationOutOfMemory());
  2562. Assert(!parallelMarkContext2.GetPageAllocator()->DisableAllocationOutOfMemory());
  2563. Assert(!parallelMarkContext3.GetPageAllocator()->DisableAllocationOutOfMemory());
  2564. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::RecyclerPhase, _u("EndMarkOnLowMemory iterations: %d\n"), iterations);
  2565. #if ENABLE_PARTIAL_GC
  2566. Assert(this->clientTrackedObjectList.Empty());
  2567. #endif
  2568. #if ENABLE_CONCURRENT_GC
  2569. Assert(!this->DoQueueTrackedObject());
  2570. #endif
  2571. this->inEndMarkOnLowMemory = false;
  2572. #if ENABLE_PARTIAL_GC
  2573. if (this->inPartialCollectMode)
  2574. {
  2575. this->FinishPartialCollect();
  2576. }
  2577. #endif
  2578. GCETW(GC_ENDMARKONLOWMEMORY_STOP, (this));
  2579. }
  2580. #if DBG
  2581. bool
  2582. Recycler::IsMarkStackEmpty()
  2583. {
  2584. return (markContext.IsEmpty() && parallelMarkContext1.IsEmpty() && parallelMarkContext2.IsEmpty() && parallelMarkContext3.IsEmpty());
  2585. }
  2586. #endif
  2587. #ifdef HEAP_ENUMERATION_VALIDATION
  2588. void
  2589. Recycler::PostHeapEnumScan(PostHeapEnumScanCallback callback, void *data)
  2590. {
  2591. this->pfPostHeapEnumScanCallback = callback;
  2592. this->postHeapEnunScanData = data;
  2593. FindRoots();
  2594. ProcessMark(false);
  2595. this->pfPostHeapEnumScanCallback = NULL;
  2596. this->postHeapEnunScanData = NULL;
  2597. }
  2598. #endif
  2599. #if ENABLE_CONCURRENT_GC
  2600. bool
  2601. Recycler::QueueTrackedObject(FinalizableObject * trackableObject)
  2602. {
  2603. return markContext.AddTrackedObject(trackableObject);
  2604. }
  2605. #endif
  2606. bool
  2607. Recycler::FindImplicitRootObject(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2608. {
  2609. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2610. if (heapBlock == nullptr)
  2611. {
  2612. return false;
  2613. }
  2614. if (heapBlock->GetHeapBlockType() < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount)
  2615. {
  2616. return ((SmallHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2617. }
  2618. else if (!heapBlock->IsLargeHeapBlock())
  2619. {
  2620. return ((MediumHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2621. }
  2622. else
  2623. {
  2624. return ((LargeHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2625. }
  2626. }
  2627. bool
  2628. Recycler::FindHeapObject(void* candidate, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject)
  2629. {
  2630. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2631. return heapBlock && heapBlock->FindHeapObject(candidate, this, flags, heapObject);
  2632. }
  2633. bool
  2634. Recycler::FindHeapObjectWithClearedAllocators(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2635. {
  2636. // Heap enum has some case where it allocates, so we can't assert
  2637. Assert(autoHeap.AllocatorsAreEmpty() || this->isHeapEnumInProgress);
  2638. return FindHeapObject(candidate, FindHeapObjectFlags_ClearedAllocators, heapObject);
  2639. }
  2640. void*
  2641. Recycler::GetRealAddressFromInterior(void* candidate)
  2642. {
  2643. HeapBlock * heapBlock = heapBlockMap.GetHeapBlock(candidate);
  2644. if (heapBlock == NULL)
  2645. {
  2646. return NULL;
  2647. }
  2648. return heapBlock->GetRealAddressFromInterior(candidate);
  2649. }
  2650. /*------------------------------------------------------------------------------------------------
  2651. * Sweep
  2652. *------------------------------------------------------------------------------------------------*/
  2653. #if ENABLE_PARTIAL_GC
  2654. bool
  2655. Recycler::Sweep(size_t rescanRootBytes, bool concurrent, bool adjustPartialHeuristics)
  2656. #else
  2657. bool
  2658. Recycler::Sweep(bool concurrent)
  2659. #endif
  2660. {
  2661. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2662. Assert(!this->hasBackgroundFinishPartial);
  2663. #endif
  2664. #if ENABLE_CONCURRENT_GC
  2665. if (!this->enableConcurrentSweep)
  2666. #endif
  2667. {
  2668. concurrent = false;
  2669. }
  2670. RECYCLER_PROFILE_EXEC_BEGIN(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2671. #if ENABLE_PARTIAL_GC
  2672. recyclerSweepInstance.BeginSweep(this, rescanRootBytes, adjustPartialHeuristics);
  2673. #else
  2674. recyclerSweepInstance.BeginSweep(this);
  2675. #endif
  2676. this->SweepHeap(concurrent, *recyclerSweep);
  2677. #if ENABLE_CONCURRENT_GC
  2678. if (concurrent)
  2679. {
  2680. // If we finished mark in the background, all the relevant write watches should already be reset
  2681. // Only reset write watch if we didn't finish mark in the background
  2682. if (this->backgroundFinishMarkCount == 0)
  2683. {
  2684. #if ENABLE_PARTIAL_GC
  2685. if (this->inPartialCollectMode)
  2686. {
  2687. #ifdef RECYCLER_WRITE_WATCH
  2688. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2689. {
  2690. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  2691. if (!recyclerPageAllocator.ResetWriteWatch() || !recyclerLargeBlockPageAllocator.ResetWriteWatch())
  2692. {
  2693. // Shouldn't happen
  2694. Assert(false);
  2695. // Disable partial collect
  2696. this->enablePartialCollect = false;
  2697. // We haven't done any partial collection yet, just get out of partial collect mode
  2698. this->inPartialCollectMode = false;
  2699. }
  2700. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  2701. }
  2702. #endif
  2703. }
  2704. #endif
  2705. }
  2706. }
  2707. else
  2708. #endif
  2709. {
  2710. recyclerSweep->FinishSweep();
  2711. recyclerSweep->EndSweep();
  2712. }
  2713. RECYCLER_PROFILE_EXEC_END(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2714. this->collectionState = CollectionStatePostSweepRedeferralCallback;
  2715. // Note that PostSweepRedeferralCallback can't have exception escape.
  2716. collectionWrapper->PostSweepRedeferralCallBack();
  2717. #if ENABLE_CONCURRENT_GC
  2718. if (concurrent)
  2719. {
  2720. if (!StartConcurrent(CollectionStateConcurrentSweep))
  2721. {
  2722. // Failed to spawn the concurrent sweep.
  2723. // Instead, force the concurrent sweep to happen right here in thread.
  2724. this->collectionState = CollectionStateConcurrentSweep;
  2725. DoBackgroundWork(true);
  2726. // Continue as if the concurrent sweep were executing
  2727. // Next time we check for completion, we will finish the sweep just as if it had happened out of thread.
  2728. }
  2729. return true;
  2730. }
  2731. #endif
  2732. return false;
  2733. }
  2734. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2735. void Recycler::DisplayMemStats()
  2736. {
  2737. #ifdef PERF_COUNTERS
  2738. #if DBG_DUMP
  2739. Output::Print(_u("Recycler Live Object Count %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectCounter().GetValue());
  2740. Output::Print(_u("Recycler Live Object Size %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectSizeCounter().GetValue());
  2741. #endif
  2742. Output::Print(_u("Recycler Used Page Size %u\n"), PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(PageAllocatorType::PageAllocatorType_Recycler).GetValue());
  2743. #endif
  2744. }
  2745. #endif
  2746. CollectedRecyclerWeakRefHeapBlock CollectedRecyclerWeakRefHeapBlock::Instance;
  2747. void
  2748. Recycler::SweepWeakReference()
  2749. {
  2750. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::SweepWeakPhase);
  2751. GCETW(GC_SWEEP_WEAKREF_START, (this));
  2752. // REVIEW: Clean up the weak reference map concurrently?
  2753. bool hasCleanup = false;
  2754. weakReferenceMap.Map([&hasCleanup](RecyclerWeakReferenceBase * weakRef) -> bool
  2755. {
  2756. if (!weakRef->weakRefHeapBlock->TestObjectMarkedBit(weakRef))
  2757. {
  2758. hasCleanup = true;
  2759. // Remove
  2760. return false;
  2761. }
  2762. if (!weakRef->strongRefHeapBlock->TestObjectMarkedBit(weakRef->strongRef))
  2763. {
  2764. hasCleanup = true;
  2765. weakRef->strongRef = nullptr;
  2766. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  2767. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  2768. // Remove
  2769. return false;
  2770. }
  2771. // Keep
  2772. return true;
  2773. });
  2774. this->weakReferenceCleanupId += hasCleanup;
  2775. GCETW(GC_SWEEP_WEAKREF_STOP, (this));
  2776. RECYCLER_PROFILE_EXEC_END(this, Js::SweepWeakPhase);
  2777. }
  2778. void
  2779. Recycler::SweepHeap(bool concurrent, RecyclerSweep& recyclerSweep)
  2780. {
  2781. Assert(!this->hasPendingDeleteGuestArena);
  2782. Assert(!this->isHeapEnumInProgress);
  2783. #if ENABLE_CONCURRENT_GC
  2784. Assert(!this->DoQueueTrackedObject());
  2785. if (concurrent)
  2786. {
  2787. collectionState = CollectionStateSetupConcurrentSweep;
  2788. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2789. if (CONFIG_FLAG(EnableBGFreeZero))
  2790. {
  2791. // Only queue up non-leaf pages- leaf pages don't need to be zeroed out
  2792. recyclerPageAllocator.StartQueueZeroPage();
  2793. recyclerLargeBlockPageAllocator.StartQueueZeroPage();
  2794. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2795. recyclerWithBarrierPageAllocator.StartQueueZeroPage();
  2796. #endif
  2797. }
  2798. #endif
  2799. }
  2800. else
  2801. #endif
  2802. {
  2803. Assert(!concurrent);
  2804. collectionState = CollectionStateSweep;
  2805. }
  2806. this->SweepWeakReference();
  2807. #if ENABLE_CONCURRENT_GC
  2808. if (concurrent)
  2809. {
  2810. GCETW(GC_SETUPBACKGROUNDSWEEP_START, (this));
  2811. }
  2812. else
  2813. #endif
  2814. {
  2815. GCETW(GC_SWEEP_START, (this));
  2816. }
  2817. recyclerPageAllocator.SuspendIdleDecommit();
  2818. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2819. recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
  2820. #endif
  2821. recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
  2822. autoHeap.Sweep(recyclerSweep, concurrent);
  2823. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2824. recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
  2825. #endif
  2826. recyclerPageAllocator.ResumeIdleDecommit();
  2827. recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
  2828. #if ENABLE_CONCURRENT_GC
  2829. if (concurrent)
  2830. {
  2831. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2832. if (CONFIG_FLAG(EnableBGFreeZero))
  2833. {
  2834. recyclerPageAllocator.StopQueueZeroPage();
  2835. recyclerLargeBlockPageAllocator.StopQueueZeroPage();
  2836. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2837. recyclerWithBarrierPageAllocator.StopQueueZeroPage();
  2838. #endif
  2839. }
  2840. #endif
  2841. GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
  2842. }
  2843. else
  2844. {
  2845. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2846. if (CONFIG_FLAG(EnableBGFreeZero))
  2847. {
  2848. Assert(!recyclerPageAllocator.HasZeroQueuedPages());
  2849. Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
  2850. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2851. Assert(!recyclerWithBarrierPageAllocator.HasZeroQueuedPages());
  2852. #endif
  2853. }
  2854. #endif
  2855. uint sweptBytes = 0;
  2856. #ifdef RECYCLER_STATS
  2857. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2858. #endif
  2859. GCETW(GC_SWEEP_STOP, (this, sweptBytes));
  2860. }
  2861. #endif
  2862. }
  2863. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2864. void
  2865. Recycler::BackgroundFinishPartialCollect(RecyclerSweep * recyclerSweep)
  2866. {
  2867. Assert(this->inPartialCollectMode);
  2868. Assert(recyclerSweep != nullptr && recyclerSweep->IsBackground());
  2869. this->hasBackgroundFinishPartial = true;
  2870. this->autoHeap.FinishPartialCollect(recyclerSweep);
  2871. this->inPartialCollectMode = false;
  2872. }
  2873. #endif
  2874. void
  2875. Recycler::DisposeObjects()
  2876. {
  2877. Assert(this->allowDispose && this->hasDisposableObject && !this->inDispose);
  2878. Assert(!isHeapEnumInProgress);
  2879. GCETW(GC_DISPOSE_START, (this));
  2880. ASYNC_HOST_OPERATION_START(collectionWrapper);
  2881. this->inDispose = true;
  2882. #ifdef PROFILE_RECYCLER_ALLOC
  2883. // finalizer may allocate memory and dispose object can happen in the middle of allocation
  2884. // save and restore the tracked object info
  2885. TrackAllocData oldAllocData = { 0 };
  2886. if (trackerDictionary != nullptr)
  2887. {
  2888. oldAllocData = nextAllocData;
  2889. nextAllocData.Clear();
  2890. }
  2891. #endif
  2892. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2893. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2894. {
  2895. Output::Print(_u("Disposing objects\n"));
  2896. }
  2897. #endif
  2898. // Disable dispose within this method, restore it when we're done
  2899. AutoRestoreValue<bool> disableDispose(&this->allowDispose, false);
  2900. #ifdef FAULT_INJECTION
  2901. this->collectionWrapper->DisposeScriptContextByFaultInjectionCallBack();
  2902. #endif
  2903. this->collectionWrapper->PreDisposeObjectsCallBack();
  2904. // Scope timestamp to just dispose
  2905. {
  2906. AUTO_TIMESTAMP(dispose);
  2907. autoHeap.DisposeObjects();
  2908. }
  2909. #ifdef PROFILE_RECYCLER_ALLOC
  2910. if (trackerDictionary != nullptr)
  2911. {
  2912. Assert(nextAllocData.IsEmpty());
  2913. nextAllocData = oldAllocData;
  2914. }
  2915. #endif
  2916. #ifdef ENABLE_PROJECTION
  2917. {
  2918. Assert(!this->inResolveExternalWeakReferences);
  2919. Assert(!this->allowDispose);
  2920. #if DBG || defined RECYCLER_TRACE
  2921. AutoRestoreValue<bool> inResolveExternalWeakReferencedObjects(&this->inResolveExternalWeakReferences, true);
  2922. #endif
  2923. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  2924. // This is where it is safe to resolve external weak references as they can lead to new script entry
  2925. collectionWrapper->ResolveExternalWeakReferencedObjects();
  2926. }
  2927. #endif
  2928. Assert(!this->inResolveExternalWeakReferences);
  2929. Assert(this->inDispose);
  2930. this->inDispose = false;
  2931. ASYNC_HOST_OPERATION_END(collectionWrapper);
  2932. uint sweptBytes = 0;
  2933. #ifdef RECYCLER_STATS
  2934. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2935. #endif
  2936. GCETW(GC_DISPOSE_STOP, (this, sweptBytes));
  2937. }
  2938. bool
  2939. Recycler::FinishDisposeObjects()
  2940. {
  2941. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] AllowDispose in FinishDisposeObject: %d\n"), this->allowDispose);
  2942. if (this->hasDisposableObject && this->allowDispose)
  2943. {
  2944. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] FinishDisposeObject, calling Dispose: %d\n"), this->allowDispose);
  2945. #ifdef RECYCLER_TRACE
  2946. CollectionParam savedCollectionParam = collectionParam;
  2947. #endif
  2948. DisposeObjects();
  2949. #ifdef RECYCLER_TRACE
  2950. collectionParam = savedCollectionParam;
  2951. #endif
  2952. // FinishDisposeObjects is always called either during a collection,
  2953. // or we will check the NeedExhaustiveRepeatCollect(), so no need to check it here
  2954. return true;
  2955. }
  2956. #ifdef RECYCLER_TRACE
  2957. if (!this->inDispose && this->hasDisposableObject
  2958. && GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2959. {
  2960. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Dispose object delayed"));
  2961. }
  2962. #endif
  2963. return false;
  2964. }
  2965. template bool Recycler::FinishDisposeObjectsNow<FinishDispose>();
  2966. template bool Recycler::FinishDisposeObjectsNow<FinishDisposeTimed>();
  2967. template <CollectionFlags flags>
  2968. bool
  2969. Recycler::FinishDisposeObjectsNow()
  2970. {
  2971. if (inDisposeWrapper)
  2972. {
  2973. return false;
  2974. }
  2975. return FinishDisposeObjectsWrapped<flags>();
  2976. }
  2977. template <CollectionFlags flags>
  2978. inline
  2979. bool
  2980. Recycler::FinishDisposeObjectsWrapped()
  2981. {
  2982. const BOOL allowDisposeFlag = flags & CollectOverride_AllowDispose;
  2983. if (allowDisposeFlag && this->NeedDispose())
  2984. {
  2985. if ((flags & CollectHeuristic_TimeIfScriptActive) == CollectHeuristic_TimeIfScriptActive)
  2986. {
  2987. if (!this->NeedDisposeTimed())
  2988. {
  2989. return false;
  2990. }
  2991. }
  2992. this->allowDispose = true;
  2993. this->inDisposeWrapper = true;
  2994. #ifdef RECYCLER_TRACE
  2995. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2996. {
  2997. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Process delayed dispose object"));
  2998. }
  2999. #endif
  3000. collectionWrapper->DisposeObjects(this);
  3001. // Dispose may get into message loop and cause a reentrant GC. If those don't allow reentrant
  3002. // it will get added to a pending collect request.
  3003. // FinishDisposedObjectsWrapped/DisposeObjectsWrapped is called at a place that might not be during a collection
  3004. // and won't check NeedExhaustiveRepeatCollect(), need to check it here to honor those requests
  3005. if (!this->CollectionInProgress() && NeedExhaustiveRepeatCollect() && ((flags & CollectOverride_NoExhaustiveCollect) != CollectOverride_NoExhaustiveCollect))
  3006. {
  3007. #ifdef RECYCLER_TRACE
  3008. CaptureCollectionParam((CollectionFlags)(flags & ~CollectMode_Partial), true);
  3009. #endif
  3010. DoCollectWrapped((CollectionFlags)(flags & ~CollectMode_Partial));
  3011. }
  3012. this->inDisposeWrapper = false;
  3013. return true;
  3014. }
  3015. return false;
  3016. }
  3017. /*------------------------------------------------------------------------------------------------
  3018. * Collect
  3019. *------------------------------------------------------------------------------------------------*/
  3020. BOOL
  3021. Recycler::CollectOnAllocatorThread()
  3022. {
  3023. #if ENABLE_PARTIAL_GC
  3024. Assert(!inPartialCollectMode);
  3025. #endif
  3026. #ifdef RECYCLER_TRACE
  3027. PrintCollectTrace(Js::GarbageCollectPhase);
  3028. #endif
  3029. this->CollectionBegin<Js::GarbageCollectPhase>();
  3030. this->Mark();
  3031. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3032. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3033. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3034. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3035. // turned off, it will remain off until a concurrent GC happens
  3036. this->Sweep();
  3037. this->CollectionEnd<Js::GarbageCollectPhase>();
  3038. FinishCollection();
  3039. return true;
  3040. }
  3041. // Explicitly instantiate all possible modes
  3042. template BOOL Recycler::CollectNow<CollectOnScriptIdle>();
  3043. template BOOL Recycler::CollectNow<CollectOnScriptExit>();
  3044. template BOOL Recycler::CollectNow<CollectOnAllocation>();
  3045. template BOOL Recycler::CollectNow<CollectOnTypedArrayAllocation>();
  3046. template BOOL Recycler::CollectNow<CollectOnScriptCloseNonPrimary>();
  3047. template BOOL Recycler::CollectNow<CollectExhaustiveCandidate>();
  3048. template BOOL Recycler::CollectNow<CollectNowConcurrent>();
  3049. template BOOL Recycler::CollectNow<CollectNowExhaustive>();
  3050. template BOOL Recycler::CollectNow<CollectNowDecommitNowExplicit>();
  3051. template BOOL Recycler::CollectNow<CollectNowPartial>();
  3052. template BOOL Recycler::CollectNow<CollectNowConcurrentPartial>();
  3053. template BOOL Recycler::CollectNow<CollectNowForceInThread>();
  3054. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternal>();
  3055. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalNoStack>();
  3056. template BOOL Recycler::CollectNow<CollectOnRecoverFromOutOfMemory>();
  3057. template BOOL Recycler::CollectNow<CollectNowDefault>();
  3058. template BOOL Recycler::CollectNow<CollectOnSuspendCleanup>();
  3059. template BOOL Recycler::CollectNow<CollectNowDefaultLSCleanup>();
  3060. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  3061. template BOOL Recycler::CollectNow<CollectNowFinalGC>();
  3062. #endif
  3063. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3064. template BOOL Recycler::CollectNow<CollectNowExhaustiveSkipStack>();
  3065. #endif
  3066. template <CollectionFlags flags>
  3067. BOOL
  3068. Recycler::CollectNow()
  3069. {
  3070. // Force-in-thread cannot be concurrent or partial
  3071. CompileAssert((flags & CollectOverride_ForceInThread) == 0 || (flags & (CollectMode_Concurrent | CollectMode_Partial)) == 0);
  3072. // Collections not allowed when the recycler is currently executing the PostCollectionCallback
  3073. if (this->IsAllocatableCallbackState())
  3074. {
  3075. return false;
  3076. }
  3077. #if ENABLE_DEBUG_CONFIG_OPTIONS
  3078. if ((disableCollection && (flags & CollectOverride_Explicit) == 0) || isShuttingDown)
  3079. #else
  3080. if (isShuttingDown)
  3081. #endif
  3082. {
  3083. Assert(collectionState == CollectionStateNotCollecting
  3084. || collectionState == CollectionStateExit
  3085. || this->isShuttingDown);
  3086. return false;
  3087. }
  3088. if (flags & CollectOverride_ExhaustiveCandidate)
  3089. {
  3090. return CollectWithExhaustiveCandidate<flags>();
  3091. }
  3092. return CollectInternal<flags>();
  3093. }
  3094. template <CollectionFlags flags>
  3095. BOOL
  3096. Recycler::GetPartialFlag()
  3097. {
  3098. #if ENABLE_PARTIAL_GC
  3099. #pragma prefast(suppress:6313, "flags is a template parameter and can be 0")
  3100. return(flags & CollectMode_Partial) && inPartialCollectMode;
  3101. #else
  3102. return false;
  3103. #endif
  3104. }
  3105. template <CollectionFlags flags>
  3106. BOOL
  3107. Recycler::CollectWithExhaustiveCandidate()
  3108. {
  3109. Assert(flags & CollectOverride_ExhaustiveCandidate);
  3110. // Currently we don't have any exhaustive candidate that has heuristic.
  3111. Assert((flags & CollectHeuristic_Mask & ~CollectHeuristic_Never) == 0);
  3112. this->hasExhaustiveCandidate = true;
  3113. if (flags & CollectHeuristic_Never)
  3114. {
  3115. // This is just an exhaustive candidate notification. Don't trigger a GC.
  3116. return false;
  3117. }
  3118. // Continue with the GC heuristic
  3119. return CollectInternal<flags>();
  3120. }
  3121. template <CollectionFlags flags>
  3122. BOOL
  3123. Recycler::CollectInternal()
  3124. {
  3125. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3126. Assert((flags & CollectHeuristic_Never) == 0);
  3127. // If we're in a re-entrant state, we want to allow GC to be triggered only
  3128. // from allocation (or trigger points with AllowReentrant). This is to minimize
  3129. // the number of reentrant GCs
  3130. if ((flags & CollectOverride_AllowReentrant) == 0 && this->inDispose)
  3131. {
  3132. return false;
  3133. }
  3134. #ifdef RECYCLER_TRACE
  3135. CaptureCollectionParam(flags);
  3136. #endif
  3137. #if ENABLE_CONCURRENT_GC
  3138. const BOOL concurrent = flags & CollectMode_Concurrent;
  3139. const BOOL finishConcurrent = flags & CollectOverride_FinishConcurrent;
  3140. // If we priority boosted, we should try to finish it every chance we get
  3141. // Otherwise, we should finishing it if we are not doing a concurrent GC,
  3142. // or the flags tell us to always try to finish a concurrent GC (CollectOverride_FinishConcurrent)
  3143. if ((!concurrent || finishConcurrent || priorityBoost) && this->CollectionInProgress())
  3144. {
  3145. return TryFinishConcurrentCollect<flags>();
  3146. }
  3147. #endif
  3148. if (flags & CollectHeuristic_Mask)
  3149. {
  3150. // Check some heuristics first before starting a collection
  3151. return CollectWithHeuristic<flags>();
  3152. }
  3153. // Start a collection now.
  3154. return Collect<flags>();
  3155. }
  3156. template <CollectionFlags flags>
  3157. BOOL
  3158. Recycler::CollectWithHeuristic()
  3159. {
  3160. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3161. Assert((flags & CollectHeuristic_Never) == 0);
  3162. BOOL isScriptContextCloseGCPending = FALSE;
  3163. const BOOL allocSize = flags & CollectHeuristic_AllocSize;
  3164. const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
  3165. const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
  3166. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (flags & CollectHeuristic_Time);
  3167. if ((flags & CollectOverride_CheckScriptContextClose) != 0)
  3168. {
  3169. isScriptContextCloseGCPending = this->collectionWrapper->GetIsScriptContextCloseGCPending();
  3170. }
  3171. // If there is a script context close GC pending, we need to do a GC regardless
  3172. // Otherwise, we should check the heuristics to see if a GC is necessary
  3173. if (!isScriptContextCloseGCPending)
  3174. {
  3175. #if ENABLE_PARTIAL_GC
  3176. if (GetPartialFlag<flags>())
  3177. {
  3178. Assert(enablePartialCollect);
  3179. Assert(allocSize);
  3180. Assert(this->uncollectedNewPageCountPartialCollect >= RecyclerSweep::MinPartialUncollectedNewPageCount
  3181. && this->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
  3182. // PARTIAL-GC-REVIEW: For now, we have only alloc size heuristic
  3183. // Maybe improve this heuristic by looking at how many free pages are in the page allocator.
  3184. if (autoHeap.uncollectedNewPageCount > this->uncollectedNewPageCountPartialCollect)
  3185. {
  3186. return Collect<flags>();
  3187. }
  3188. }
  3189. #endif
  3190. // allocation byte count heuristic, collect every 1 MB allocated
  3191. if (allocSize && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::UncollectedAllocBytesCollection()))
  3192. {
  3193. return FinishDisposeObjectsWrapped<flags>();
  3194. }
  3195. // time heuristic, allocate every 1000 clock tick, or 64 MB is allocated in a short time
  3196. if (timed && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::Instance.MaxUncollectedAllocBytes))
  3197. {
  3198. uint currentTickCount = GetTickCount();
  3199. #ifdef RECYCLER_TRACE
  3200. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3201. #endif
  3202. if ((int)(tickCountNextCollection - currentTickCount) >= 0)
  3203. {
  3204. return FinishDisposeObjectsWrapped<flags>();
  3205. }
  3206. }
  3207. #ifdef RECYCLER_TRACE
  3208. else
  3209. {
  3210. uint currentTickCount = GetTickCount();
  3211. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3212. }
  3213. #endif
  3214. }
  3215. // Passed all the heuristic, do some GC work, maybe
  3216. return Collect<(CollectionFlags)(flags & ~CollectMode_Partial)>();
  3217. }
  3218. template <CollectionFlags flags>
  3219. BOOL
  3220. Recycler::Collect()
  3221. {
  3222. #if ENABLE_CONCURRENT_GC
  3223. if (this->CollectionInProgress())
  3224. {
  3225. // If we are forced in thread, we can't be concurrent
  3226. // If we are not concurrent we should have been handled before in CollectInternal and we shouldn't be here
  3227. Assert((flags & CollectOverride_ForceInThread) == 0);
  3228. Assert((flags & CollectMode_Concurrent) != 0);
  3229. return TryFinishConcurrentCollect<flags>();
  3230. }
  3231. #endif
  3232. // We clear the flag indicating that there is a GC pending because
  3233. // of script context close, since we're about to do a GC anyway,
  3234. // since the current GC will suffice.
  3235. this->collectionWrapper->ClearIsScriptContextCloseGCPending();
  3236. SetupPostCollectionFlags<flags>();
  3237. const BOOL partial = GetPartialFlag<flags>();
  3238. CollectionFlags finalFlags = flags;
  3239. if (!partial)
  3240. {
  3241. finalFlags = (CollectionFlags)(flags & ~CollectMode_Partial);
  3242. }
  3243. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3244. // in SetupPostCollectionFlags because we didn't reset the inExhausitvECollection variable if
  3245. // an exception. Use this flag to disable it the assertion if exception occur
  3246. DebugOnly(this->hasIncompleteDoCollect = true);
  3247. {
  3248. RECORD_TIMESTAMP(initialCollectionStartTime);
  3249. #ifdef NTBUILD
  3250. this->telemetryBlock->initialCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3251. this->telemetryBlock->exhaustiveRepeatedCount = 0;
  3252. #endif
  3253. return DoCollectWrapped(finalFlags);
  3254. }
  3255. }
  3256. template <CollectionFlags flags>
  3257. void Recycler::SetupPostCollectionFlags()
  3258. {
  3259. // If we are not in a collection (collection in progress or in dispose), inExhaustiveCollection should not be set
  3260. // Otherwise, we have missed an exhaustive collection.
  3261. Assert(this->hasIncompleteDoCollect ||
  3262. this->CollectionInProgress() || this->inDispose || (!this->inExhaustiveCollection && !this->inDecommitNowCollection));
  3263. // Record whether we want to start exhaustive detection or do decommit now after GC
  3264. const BOOL exhaustive = flags & CollectMode_Exhaustive;
  3265. const BOOL decommitNow = flags & CollectMode_DecommitNow;
  3266. const BOOL cacheCleanup = flags & CollectMode_CacheCleanup;
  3267. if (decommitNow)
  3268. {
  3269. this->inDecommitNowCollection = true;
  3270. }
  3271. if (exhaustive)
  3272. {
  3273. this->inExhaustiveCollection = true;
  3274. }
  3275. if (cacheCleanup)
  3276. {
  3277. this->inCacheCleanupCollection = true;
  3278. }
  3279. }
  3280. BOOL
  3281. Recycler::DoCollectWrapped(CollectionFlags flags)
  3282. {
  3283. #if ENABLE_CONCURRENT_GC
  3284. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  3285. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  3286. #endif
  3287. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  3288. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
  3289. #if ENABLE_CONCURRENT_GC
  3290. Assert(IsConcurrentExecutingState() || IsConcurrentFinishedState() || !CollectionInProgress());
  3291. #else
  3292. Assert(!CollectionInProgress());
  3293. #endif
  3294. return collected;
  3295. }
  3296. bool
  3297. Recycler::NeedExhaustiveRepeatCollect() const
  3298. {
  3299. return this->inExhaustiveCollection && this->hasExhaustiveCandidate;
  3300. }
  3301. BOOL
  3302. Recycler::DoCollect(CollectionFlags flags)
  3303. {
  3304. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3305. // in SetupPostCollectionFlags because we didn't reset the inExhaustiveCollection variable if
  3306. // an exception. We are not in DoCollect, there shouldn't be any more exception. Reset the flag
  3307. DebugOnly(this->hasIncompleteDoCollect = false);
  3308. #ifdef RECYCLER_MEMORY_VERIFY
  3309. this->Verify(Js::RecyclerPhase);
  3310. #endif
  3311. #ifdef RECYCLER_FINALIZE_CHECK
  3312. autoHeap.VerifyFinalize();
  3313. #endif
  3314. #if ENABLE_PARTIAL_GC
  3315. BOOL partial = flags & CollectMode_Partial;
  3316. #if DBG && defined(RECYCLER_DUMP_OBJECT_GRAPH)
  3317. // Can't pass in RecyclerPartialStress and DumpObjectGraphOnCollect or call CollectGarbage with DumpObjectGraph
  3318. if (GetRecyclerFlagsTable().RecyclerPartialStress) {
  3319. Assert(!GetRecyclerFlagsTable().DumpObjectGraphOnCollect && !this->dumpObjectOnceOnCollect);
  3320. } else if (GetRecyclerFlagsTable().DumpObjectGraphOnCollect || this->dumpObjectOnceOnCollect) {
  3321. Assert(!GetRecyclerFlagsTable().RecyclerPartialStress);
  3322. }
  3323. #endif
  3324. #ifdef RECYCLER_STRESS
  3325. if (partial && GetRecyclerFlagsTable().RecyclerPartialStress)
  3326. {
  3327. this->inPartialCollectMode = true;
  3328. this->forcePartialScanStack = true;
  3329. }
  3330. #endif
  3331. #endif
  3332. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  3333. if (dumpObjectOnceOnCollect || GetRecyclerFlagsTable().DumpObjectGraphOnCollect)
  3334. {
  3335. DumpObjectGraph();
  3336. dumpObjectOnceOnCollect = false;
  3337. #if ENABLE_PARTIAL_GC
  3338. // Can't do a partial collect if DumpObjectGraph is set since it'll call FinishPartial
  3339. // which will set inPartialCollectMode to false.
  3340. partial = false;
  3341. #endif
  3342. }
  3343. #endif
  3344. #if ENABLE_CONCURRENT_GC
  3345. const bool concurrent = (flags & CollectMode_Concurrent) != 0;
  3346. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  3347. #else
  3348. const bool concurrent = false;
  3349. #endif
  3350. // Flush the pending dispose objects first if dispose is allowed
  3351. Assert(!this->CollectionInProgress());
  3352. #if ENABLE_CONCURRENT_GC
  3353. Assert(this->backgroundFinishMarkCount == 0);
  3354. #endif
  3355. bool collected = FinishDisposeObjects();
  3356. do
  3357. {
  3358. INC_TIMESTAMP_FIELD(exhaustiveRepeatedCount);
  3359. RECORD_TIMESTAMP(currentCollectionStartTime);
  3360. #ifdef NTBUILD
  3361. this->telemetryBlock->currentCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3362. #endif
  3363. #if ENABLE_CONCURRENT_GC
  3364. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3365. if (this->CollectionInProgress())
  3366. {
  3367. Assert(this->IsConcurrentState());
  3368. Assert(collected);
  3369. if (forceInThread)
  3370. {
  3371. return this->FinishConcurrentCollect(flags);
  3372. }
  3373. return true;
  3374. }
  3375. Assert(this->backgroundFinishMarkCount == 0);
  3376. #endif
  3377. #if DBG
  3378. collectionCount++;
  3379. #endif
  3380. collectionState = Collection_PreCollection;
  3381. collectionWrapper->PreCollectionCallBack(flags);
  3382. collectionState = CollectionStateNotCollecting;
  3383. hasExhaustiveCandidate = false; // reset the candidate detection
  3384. #ifdef RECYCLER_STATS
  3385. #if ENABLE_PARTIAL_GC
  3386. RecyclerCollectionStats oldCollectionStats = collectionStats;
  3387. #endif
  3388. memset(&collectionStats, 0, sizeof(RecyclerCollectionStats));
  3389. this->collectionStats.startCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  3390. #if ENABLE_PARTIAL_GC
  3391. this->collectionStats.startCollectNewPageCount = autoHeap.uncollectedNewPageCount;
  3392. this->collectionStats.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  3393. #endif
  3394. #endif
  3395. #if ENABLE_PARTIAL_GC
  3396. if (partial)
  3397. {
  3398. #if ENABLE_CONCURRENT_GC
  3399. Assert(!forceInThread);
  3400. #endif
  3401. #ifdef RECYCLER_STATS
  3402. // We are only doing a partial GC, copy some old stats
  3403. collectionStats.finalizeCount = oldCollectionStats.finalizeCount;
  3404. memcpy(collectionStats.heapBlockCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3405. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3406. memcpy(collectionStats.heapBlockFreeByteCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3407. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3408. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3409. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3410. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedBytes, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3411. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3412. #endif
  3413. Assert(enablePartialCollect && inPartialCollectMode);
  3414. if (!this->PartialCollect(concurrent))
  3415. {
  3416. return collected;
  3417. }
  3418. // This disable partial if we do a repeated exhaustive GC
  3419. partial = false;
  3420. collected = true;
  3421. continue;
  3422. }
  3423. // Not doing partial collect, we should decommit on finish collect
  3424. decommitOnFinish = true;
  3425. if (inPartialCollectMode)
  3426. {
  3427. // finish the partial collect first
  3428. FinishPartialCollect();
  3429. // Old heap block with free object is made available, count that as being collected
  3430. collected = true;
  3431. // PARTIAL-GC-CONSIDER: should we just pretend we did a GC, since we have made the free listed object
  3432. // available to be used, instead of starting off another GC?
  3433. }
  3434. #endif
  3435. #if ENABLE_CONCURRENT_GC
  3436. bool skipConcurrent = false;
  3437. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3438. // If the below flag is passed in, skip doing a non-blocking concurrent collect. Instead,
  3439. // we will do a blocking concurrent collect, which is basically an in-thread GC
  3440. skipConcurrent = GetRecyclerFlagsTable().ForceBlockingConcurrentCollect;
  3441. #endif
  3442. // We are about to start a collection. Reset our heuristic counters now, so that
  3443. // any allocations that occur during concurrent collection count toward the next collection's threshold.
  3444. ResetHeuristicCounters();
  3445. if (concurrent && !skipConcurrent)
  3446. {
  3447. Assert(!forceInThread);
  3448. if (enableConcurrentMark)
  3449. {
  3450. if (StartBackgroundMarkCollect())
  3451. {
  3452. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3453. return collected;
  3454. }
  3455. // Either ResetWriteWatch failed or the thread service failed
  3456. // So concurrent mark is disabled, at least for now
  3457. }
  3458. if (enableConcurrentSweep)
  3459. {
  3460. if (StartConcurrentSweepCollect())
  3461. {
  3462. collected = true;
  3463. continue;
  3464. }
  3465. // out of memory during collection
  3466. return collected;
  3467. }
  3468. // concurrent collection failed, default back to non-concurrent collection
  3469. }
  3470. if (!forceInThread && enableConcurrentMark)
  3471. {
  3472. if (!CollectOnConcurrentThread())
  3473. {
  3474. // time out or out of memory during collection
  3475. return collected;
  3476. }
  3477. }
  3478. else
  3479. #endif
  3480. {
  3481. if (!CollectOnAllocatorThread())
  3482. {
  3483. // out of memory during collection
  3484. return collected;
  3485. }
  3486. }
  3487. collected = true;
  3488. #ifdef RECYCLER_TRACE
  3489. collectionParam.repeat = true;
  3490. #endif
  3491. }
  3492. while (this->NeedExhaustiveRepeatCollect());
  3493. #if ENABLE_CONCURRENT_GC
  3494. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3495. if (this->CollectionInProgress())
  3496. {
  3497. Assert(this->IsConcurrentState());
  3498. Assert(collected);
  3499. return true;
  3500. }
  3501. #endif
  3502. EndCollection();
  3503. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3504. return collected;
  3505. }
  3506. void
  3507. Recycler::EndCollection()
  3508. {
  3509. #if ENABLE_CONCURRENT_GC
  3510. Assert(this->backgroundFinishMarkCount == 0);
  3511. #endif
  3512. Assert(!this->CollectionInProgress());
  3513. // no more collection is requested, we can turn exhaustive back off
  3514. this->inExhaustiveCollection = false;
  3515. if (this->inDecommitNowCollection || CUSTOM_CONFIG_FLAG(GetRecyclerFlagsTable(), ForceDecommitOnCollect))
  3516. {
  3517. #ifdef RECYCLER_TRACE
  3518. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3519. {
  3520. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Decommit now"));
  3521. }
  3522. #endif
  3523. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  3524. {
  3525. pageAlloc->DecommitNow();
  3526. });
  3527. this->inDecommitNowCollection = false;
  3528. }
  3529. RECORD_TIMESTAMP(lastCollectionEndTime);
  3530. }
  3531. #if ENABLE_PARTIAL_GC
  3532. bool
  3533. Recycler::PartialCollect(bool concurrent)
  3534. {
  3535. Assert(IsMarkStackEmpty());
  3536. Assert(this->inPartialCollectMode);
  3537. Assert(collectionState == CollectionStateNotCollecting);
  3538. // Rescan again
  3539. collectionState = CollectionStateRescanFindRoots;
  3540. #if ENABLE_CONCURRENT_GC
  3541. if (concurrent && enableConcurrentMark && this->partialConcurrentNextCollection)
  3542. {
  3543. this->PrepareBackgroundFindRoots();
  3544. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  3545. {
  3546. #ifdef RECYCLER_TRACE
  3547. PrintCollectTrace(Js::ConcurrentPartialCollectPhase);
  3548. #endif
  3549. return false;
  3550. }
  3551. this->RevertPrepareBackgroundFindRoots();
  3552. }
  3553. #endif
  3554. #ifdef RECYCLER_STRESS
  3555. if (forcePartialScanStack)
  3556. {
  3557. // Mark the roots since they need not have been marked
  3558. // in RecyclerPartialStress mode
  3559. this->RootMark(collectionState);
  3560. }
  3561. #endif
  3562. #ifdef RECYCLER_TRACE
  3563. PrintCollectTrace(Js::PartialCollectPhase);
  3564. #endif
  3565. bool needConcurrentSweep = false;
  3566. this->CollectionBegin<Js::PartialCollectPhase>();
  3567. size_t rescanRootBytes = FinishMark(INFINITE);
  3568. Assert(rescanRootBytes != Recycler::InvalidScanRootBytes);
  3569. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  3570. this->CollectionEnd<Js::PartialCollectPhase>();
  3571. // Only reset the new page counter
  3572. autoHeap.uncollectedNewPageCount = 0;
  3573. // Finish collection
  3574. FinishCollection(needConcurrentSweep);
  3575. return true;
  3576. }
  3577. void
  3578. Recycler::ProcessClientTrackedObjects()
  3579. {
  3580. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_START, (this));
  3581. Assert(this->inPartialCollectMode);
  3582. #if ENABLE_CONCURRENT_GC
  3583. Assert(!this->DoQueueTrackedObject());
  3584. #endif
  3585. if (!this->clientTrackedObjectList.Empty())
  3586. {
  3587. SListBase<void *>::Iterator iter(&this->clientTrackedObjectList);
  3588. while (iter.Next())
  3589. {
  3590. auto& reference = iter.Data();
  3591. this->TryMarkNonInterior(reference, &reference /* parentReference */); // Reference to inside the node
  3592. RECYCLER_STATS_INC(this, clientTrackedObjectCount);
  3593. }
  3594. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3595. }
  3596. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_STOP, (this));
  3597. }
  3598. void
  3599. Recycler::ClearPartialCollect()
  3600. {
  3601. #if ENABLE_CONCURRENT_GC
  3602. Assert(!this->DoQueueTrackedObject());
  3603. #endif
  3604. this->autoHeap.unusedPartialCollectFreeBytes = 0;
  3605. this->partialUncollectedAllocBytes = 0;
  3606. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3607. this->uncollectedNewPageCountPartialCollect = (size_t)-1;
  3608. }
  3609. void
  3610. Recycler::FinishPartialCollect(RecyclerSweep * recyclerSweep)
  3611. {
  3612. Assert(recyclerSweep == nullptr || !recyclerSweep->IsBackground());
  3613. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FinishPartialPhase);
  3614. Assert(inPartialCollectMode);
  3615. #if ENABLE_CONCURRENT_GC
  3616. Assert(!this->DoQueueTrackedObject());
  3617. #endif
  3618. autoHeap.FinishPartialCollect(recyclerSweep);
  3619. this->inPartialCollectMode = false;
  3620. ClearPartialCollect();
  3621. RECYCLER_PROFILE_EXEC_END(this, Js::FinishPartialPhase);
  3622. }
  3623. #endif
  3624. void
  3625. Recycler::EnsureNotCollecting()
  3626. {
  3627. #if ENABLE_CONCURRENT_GC
  3628. FinishConcurrent<ForceFinishCollection>();
  3629. #endif
  3630. Assert(!this->CollectionInProgress());
  3631. }
  3632. void Recycler::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  3633. {
  3634. // Make sure we are not collecting
  3635. EnsureNotCollecting();
  3636. #if ENABLE_PARTIAL_GC
  3637. // We are updating the free bit vector, messing up the partial collection state.
  3638. // Just get out of partial collect mode
  3639. // GC-CONSIDER: consider adding an option in FinishConcurrent to not get into partial collect mode during sweep.
  3640. if (inPartialCollectMode)
  3641. {
  3642. FinishPartialCollect();
  3643. }
  3644. #endif
  3645. autoHeap.EnumerateObjects(infoBits, CallBackFunction);
  3646. // GC-TODO: Explicit heap?
  3647. }
  3648. BOOL
  3649. Recycler::IsMarkState() const
  3650. {
  3651. return (collectionState & Collection_Mark);
  3652. }
  3653. BOOL
  3654. Recycler::IsFindRootsState() const
  3655. {
  3656. return (collectionState & Collection_FindRoots);
  3657. }
  3658. #if DBG
  3659. BOOL
  3660. Recycler::IsReentrantState() const
  3661. {
  3662. #if ENABLE_CONCURRENT_GC
  3663. return !this->CollectionInProgress() || this->IsConcurrentState();
  3664. #else
  3665. return !this->CollectionInProgress();
  3666. #endif
  3667. }
  3668. #endif
  3669. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  3670. template <Js::Phase phase> static ETWEventGCActivationKind GetETWEventGCActivationKind();
  3671. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::GarbageCollectPhase>() { return ETWEvent_GarbageCollect; }
  3672. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ThreadCollectPhase>() { return ETWEvent_ThreadCollect; }
  3673. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ConcurrentCollectPhase>() { return ETWEvent_ConcurrentCollect; }
  3674. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::PartialCollectPhase>() { return ETWEvent_PartialCollect; }
  3675. #endif
  3676. template <Js::Phase phase>
  3677. void
  3678. Recycler::CollectionBegin()
  3679. {
  3680. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase, phase);
  3681. GCETW_INTERNAL(GC_START, (this, GetETWEventGCActivationKind<phase>()));
  3682. }
  3683. template <Js::Phase phase>
  3684. void
  3685. Recycler::CollectionEnd()
  3686. {
  3687. GCETW_INTERNAL(GC_STOP, (this, GetETWEventGCActivationKind<phase>()));
  3688. RECYCLER_PROFILE_EXEC_END2(this, phase, Js::RecyclerPhase);
  3689. }
  3690. #if ENABLE_CONCURRENT_GC
  3691. size_t
  3692. Recycler::BackgroundRescan(RescanFlags rescanFlags)
  3693. {
  3694. Assert(!this->isProcessingRescan);
  3695. DebugOnly(this->isProcessingRescan = true);
  3696. GCETW(GC_BACKGROUNDRESCAN_START, (this, backgroundRescanCount));
  3697. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRescanPhase);
  3698. #if GLOBAL_ENABLE_WRITE_BARRIER
  3699. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  3700. {
  3701. pendingWriteBarrierBlockMap.LockResize();
  3702. pendingWriteBarrierBlockMap.Map([](void* address, size_t size)
  3703. {
  3704. RecyclerWriteBarrierManager::WriteBarrier(address, size);
  3705. });
  3706. pendingWriteBarrierBlockMap.UnlockResize();
  3707. }
  3708. #endif
  3709. size_t rescannedPageCount = heapBlockMap.Rescan(this, ((rescanFlags & RescanFlags_ResetWriteWatch) != 0));
  3710. rescannedPageCount += autoHeap.Rescan(rescanFlags);
  3711. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRescanPhase);
  3712. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, backgroundRescanCount));
  3713. this->backgroundRescanCount++;
  3714. if (!this->NeedOOMRescan())
  3715. {
  3716. if ((rescanFlags & RescanFlags_ResetWriteWatch) != 0)
  3717. {
  3718. DebugOnly(this->isProcessingRescan = false);
  3719. }
  3720. return rescannedPageCount;
  3721. }
  3722. DebugOnly(this->isProcessingRescan = false);
  3723. return Recycler::InvalidScanRootBytes;
  3724. }
  3725. void
  3726. Recycler::BackgroundResetWriteWatchAll()
  3727. {
  3728. GCETW(GC_BACKGROUNDRESETWRITEWATCH_START, (this, -1));
  3729. heapBlockMap.ResetDirtyPages(this);
  3730. GCETW(GC_BACKGROUNDRESETWRITEWATCH_STOP, (this, -1));
  3731. }
  3732. #endif
  3733. size_t
  3734. Recycler::FinishMarkRescan(bool background)
  3735. {
  3736. #if !ENABLE_CONCURRENT_GC
  3737. Assert(!background);
  3738. #endif
  3739. if (background)
  3740. {
  3741. GCETW(GC_BACKGROUNDRESCAN_START, (this, 0));
  3742. }
  3743. else
  3744. {
  3745. GCETW(GC_RESCAN_START, (this));
  3746. }
  3747. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::RescanPhase);
  3748. #if ENABLE_CONCURRENT_GC
  3749. RescanFlags const flags = (background ? RescanFlags_ResetWriteWatch : RescanFlags_None);
  3750. #else
  3751. Assert(!background);
  3752. RescanFlags const flags = RescanFlags_None;
  3753. #endif
  3754. #if DBG
  3755. Assert(!this->isProcessingRescan);
  3756. this->isProcessingRescan = true;
  3757. #endif
  3758. #if ENABLE_CONCURRENT_GC
  3759. size_t scannedPageCount = heapBlockMap.Rescan(this, ((flags & RescanFlags_ResetWriteWatch) != 0));
  3760. scannedPageCount += autoHeap.Rescan(flags);
  3761. #else
  3762. size_t scannedPageCount = 0;
  3763. #endif
  3764. DebugOnly(this->isProcessingRescan = false);
  3765. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::RescanPhase);
  3766. if (background)
  3767. {
  3768. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, 0));
  3769. }
  3770. else
  3771. {
  3772. GCETW(GC_RESCAN_STOP, (this));
  3773. }
  3774. return scannedPageCount;
  3775. }
  3776. #if ENABLE_CONCURRENT_GC
  3777. void
  3778. Recycler::ProcessTrackedObjects()
  3779. {
  3780. GCETW(GC_PROCESS_TRACKED_OBJECT_START, (this));
  3781. #if ENABLE_PARTIAL_GC
  3782. Assert(this->clientTrackedObjectList.Empty());
  3783. Assert(!this->inPartialCollectMode);
  3784. #endif
  3785. Assert(this->DoQueueTrackedObject());
  3786. this->queueTrackedObject = false;
  3787. DebugOnly(this->isProcessingTrackedObjects = true);
  3788. markContext.ProcessTracked();
  3789. // If we did a parallel mark, we need to process any queued tracked objects from the parallel mark stack as well.
  3790. // If we didn't, this will do nothing.
  3791. parallelMarkContext1.ProcessTracked();
  3792. parallelMarkContext2.ProcessTracked();
  3793. parallelMarkContext3.ProcessTracked();
  3794. DebugOnly(this->isProcessingTrackedObjects = false);
  3795. GCETW(GC_PROCESS_TRACKED_OBJECT_STOP, (this));
  3796. }
  3797. #endif
  3798. BOOL
  3799. Recycler::RequestConcurrentWrapperCallback()
  3800. {
  3801. #if ENABLE_CONCURRENT_GC
  3802. Assert(!IsConcurrentExecutingState());
  3803. // Save the original collection state
  3804. CollectionState oldState = this->collectionState;
  3805. // Get the background thread to start the callback
  3806. if (StartConcurrent(CollectionStateConcurrentWrapperCallback))
  3807. {
  3808. // Wait for the callback to complete
  3809. WaitForConcurrentThread(INFINITE);
  3810. // The state must not change back until we restore the original state
  3811. Assert(collectionState == CollectionStateConcurrentWrapperCallback);
  3812. this->collectionState = oldState;
  3813. return true;
  3814. }
  3815. #endif
  3816. return false;
  3817. }
  3818. #if ENABLE_CONCURRENT_GC
  3819. /*------------------------------------------------------------------------------------------------
  3820. * Concurrent
  3821. *------------------------------------------------------------------------------------------------*/
  3822. BOOL
  3823. Recycler::CollectOnConcurrentThread()
  3824. {
  3825. #if ENABLE_PARTIAL_GC
  3826. Assert(!inPartialCollectMode);
  3827. #endif
  3828. #ifdef RECYCLER_TRACE
  3829. PrintCollectTrace(Js::ThreadCollectPhase);
  3830. #endif
  3831. this->CollectionBegin<Js::ThreadCollectPhase>();
  3832. // Synchronous concurrent mark
  3833. if (!StartSynchronousBackgroundMark())
  3834. {
  3835. this->CollectionEnd<Js::ThreadCollectPhase>();
  3836. return false;
  3837. }
  3838. const DWORD waitTime = RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  3839. GCETW(GC_SYNCHRONOUSMARKWAIT_START, (this, waitTime));
  3840. const BOOL waited = WaitForConcurrentThread(waitTime);
  3841. GCETW(GC_SYNCHRONOUSMARKWAIT_STOP, (this, !waited));
  3842. if (!waited)
  3843. {
  3844. #ifdef RECYCLER_TRACE
  3845. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase)
  3846. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ThreadCollectPhase))
  3847. {
  3848. Output::Print(_u("%04X> RC(%p): %s: %s\n"), this->mainThreadId, this, Js::PhaseNames[Js::ThreadCollectPhase], _u("Timeout"));
  3849. }
  3850. #endif
  3851. this->CollectionEnd<Js::ThreadCollectPhase>();
  3852. return false;
  3853. }
  3854. // If the concurrent thread was done within the time limit, there shouldn't be
  3855. // any object needs to be rescanned
  3856. // CONCURRENT-TODO: Optimize it so we don't rescan in the background if we are still waiting
  3857. // GC-TODO: Unfortunately we can't assert this, as the background code gen thread may still
  3858. // touch GC memory (e.g. FunctionBody), causing write watch and rescan
  3859. // in the background.
  3860. // Assert(markContext.Empty());
  3861. DebugOnly(this->isProcessingRescan = false);
  3862. this->collectionState = CollectionStateMark;
  3863. this->ProcessTrackedObjects();
  3864. this->ProcessMark(false);
  3865. this->EndMark();
  3866. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3867. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3868. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3869. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3870. // turned off, it will remain off until a concurrent GC happens
  3871. this->Sweep();
  3872. this->CollectionEnd<Js::ThreadCollectPhase>();
  3873. FinishCollection();
  3874. return true;
  3875. }
  3876. // explicit instantiation
  3877. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdle>();
  3878. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdleAtRoot>();
  3879. template BOOL Recycler::FinishConcurrent<FinishConcurrentDefault>();
  3880. template BOOL Recycler::FinishConcurrent<ForceFinishCollection>();
  3881. template <CollectionFlags flags>
  3882. BOOL
  3883. Recycler::FinishConcurrent()
  3884. {
  3885. CompileAssert((flags & ~(CollectOverride_AllowDispose | CollectOverride_ForceFinish | CollectOverride_ForceInThread
  3886. | CollectMode_Concurrent | CollectOverride_DisableIdleFinish | CollectOverride_BackgroundFinishMark
  3887. | CollectOverride_SkipStack | CollectOverride_FinishConcurrentTimeout)) == 0);
  3888. if (this->CollectionInProgress())
  3889. {
  3890. Assert(this->IsConcurrentEnabled());
  3891. Assert(IsConcurrentState());
  3892. const BOOL forceFinish = flags & CollectOverride_ForceFinish;
  3893. if (forceFinish || !IsConcurrentExecutingState())
  3894. {
  3895. #if ENABLE_BACKGROUND_PAGE_FREEING
  3896. if (CONFIG_FLAG(EnableBGFreeZero))
  3897. {
  3898. if (this->collectionState == CollectionStateConcurrentSweep)
  3899. {
  3900. // Help with the background thread to zero and flush zero pages
  3901. // if we are going to wait anyways.
  3902. recyclerPageAllocator.ZeroQueuedPages();
  3903. recyclerLargeBlockPageAllocator.ZeroQueuedPages();
  3904. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  3905. recyclerWithBarrierPageAllocator.ZeroQueuedPages();
  3906. #endif
  3907. this->FlushBackgroundPages();
  3908. }
  3909. }
  3910. #endif
  3911. #ifdef RECYCLER_TRACE
  3912. collectionParam.finishOnly = true;
  3913. collectionParam.flags = flags;
  3914. #endif
  3915. #if ENABLE_CONCURRENT_GC
  3916. // If SkipStack is provided, and we're not forcing the finish (i.e we're not in concurrent executing state)
  3917. // then, it's fine to set the skipStack flag to true, so that during the in-thread find-roots, we'll skip
  3918. // the stack scan
  3919. this->skipStack = ((flags & CollectOverride_SkipStack) != 0) && !forceFinish;
  3920. #if DBG
  3921. this->isFinishGCOnIdle = (flags == FinishConcurrentOnIdleAtRoot);
  3922. #endif
  3923. #endif
  3924. return FinishConcurrentCollectWrapped(flags);
  3925. }
  3926. }
  3927. return false;
  3928. }
  3929. template <CollectionFlags flags>
  3930. BOOL
  3931. Recycler::TryFinishConcurrentCollect()
  3932. {
  3933. Assert(this->CollectionInProgress());
  3934. RECYCLER_STATS_INC(this, finishCollectTryCount);
  3935. SetupPostCollectionFlags<flags>();
  3936. const BOOL concurrent = flags & CollectMode_Concurrent;
  3937. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  3938. Assert(this->IsConcurrentEnabled());
  3939. Assert(IsConcurrentState() || IsCollectionDisabled());
  3940. Assert(!concurrent || !forceInThread);
  3941. if (concurrent && concurrentThread != NULL)
  3942. {
  3943. if (IsConcurrentExecutingState())
  3944. {
  3945. if (!this->priorityBoost)
  3946. {
  3947. uint tickCount = GetTickCount();
  3948. if ((autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  3949. || (tickCount - this->tickCountStartConcurrent > RecyclerHeuristic::PriorityBoostTimeout(this->GetRecyclerFlagsTable())))
  3950. {
  3951. #ifdef RECYCLER_TRACE
  3952. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3953. {
  3954. Output::Print(_u("%04X> RC(%p): %s: "), this->mainThreadId, this, _u("Set priority normal"));
  3955. if (autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  3956. {
  3957. Output::Print(_u("AllocBytes=%d (Time=%d)\n"), autoHeap.uncollectedAllocBytes, tickCount - this->tickCountStartConcurrent);
  3958. }
  3959. else
  3960. {
  3961. Output::Print(_u("Time=%d (AllocBytes=%d\n"), tickCount - this->tickCountStartConcurrent, autoHeap.uncollectedAllocBytes);
  3962. }
  3963. }
  3964. #endif
  3965. // Set it to a large number so we don't set the thread priority again
  3966. this->priorityBoost = true;
  3967. // The recycler thread hasn't come back in 5 seconds
  3968. // It either has a large object graph, or it is starving.
  3969. // Set the priority back to normal
  3970. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  3971. }
  3972. }
  3973. return FinishDisposeObjectsWrapped<flags>();
  3974. }
  3975. else if ((flags & CollectOverride_FinishConcurrentTimeout) != 0)
  3976. {
  3977. uint tickCount = GetTickCount();
  3978. // If we haven't gone past the time to call finish collection,
  3979. // simply call FinishDisposeObjects and return
  3980. // Otherwise, actually go ahead and call FinishConcurrentCollectWrapped
  3981. // We do this only if this is a collection that allows finish concurrent to timeout
  3982. // If not, by default, we finish the collection
  3983. if (tickCount <= this->tickCountNextFinishCollection)
  3984. {
  3985. return FinishDisposeObjectsWrapped<flags>();
  3986. }
  3987. }
  3988. }
  3989. return FinishConcurrentCollectWrapped(flags);
  3990. }
  3991. BOOL
  3992. Recycler::IsConcurrentMarkState() const
  3993. {
  3994. return (collectionState & Collection_ConcurrentMark) == Collection_ConcurrentMark;
  3995. }
  3996. BOOL
  3997. Recycler::IsConcurrentMarkExecutingState() const
  3998. {
  3999. return (collectionState & (Collection_ConcurrentMark | Collection_ExecutingConcurrent)) == (Collection_ConcurrentMark | Collection_ExecutingConcurrent);
  4000. }
  4001. BOOL
  4002. Recycler::IsConcurrentResetMarksState() const
  4003. {
  4004. return collectionState == CollectionStateConcurrentResetMarks;
  4005. }
  4006. BOOL
  4007. Recycler::IsInThreadFindRootsState() const
  4008. {
  4009. CollectionState currentCollectionState = collectionState;
  4010. return (currentCollectionState & Collection_FindRoots) && (currentCollectionState != CollectionStateConcurrentFindRoots);
  4011. }
  4012. BOOL
  4013. Recycler::IsConcurrentFindRootState() const
  4014. {
  4015. return collectionState == CollectionStateConcurrentFindRoots;
  4016. }
  4017. BOOL
  4018. Recycler::IsConcurrentExecutingState() const
  4019. {
  4020. return (collectionState & Collection_ExecutingConcurrent);
  4021. }
  4022. BOOL
  4023. Recycler::IsConcurrentSweepExecutingState() const
  4024. {
  4025. return (collectionState & (Collection_ConcurrentSweep | Collection_ExecutingConcurrent)) == (Collection_ConcurrentSweep | Collection_ExecutingConcurrent);
  4026. }
  4027. BOOL
  4028. Recycler::IsConcurrentSweepSetupState() const
  4029. {
  4030. return (collectionState & CollectionStateSetupConcurrentSweep) == CollectionStateSetupConcurrentSweep;
  4031. }
  4032. BOOL
  4033. Recycler::IsConcurrentSweepState() const
  4034. {
  4035. return this->collectionState == CollectionStateConcurrentSweep;
  4036. }
  4037. BOOL
  4038. Recycler::IsConcurrentState() const
  4039. {
  4040. return (collectionState & Collection_Concurrent);
  4041. }
  4042. #if DBG
  4043. BOOL
  4044. Recycler::IsConcurrentFinishedState() const
  4045. {
  4046. return (collectionState & Collection_FinishConcurrent);
  4047. }
  4048. #endif
  4049. bool
  4050. Recycler::InitializeConcurrent(JsUtil::ThreadService *threadService)
  4051. {
  4052. try
  4053. {
  4054. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_OutOfMemory);
  4055. concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4056. if (concurrentWorkDoneEvent == nullptr)
  4057. {
  4058. throw Js::OutOfMemoryException();
  4059. }
  4060. #if DBG_DUMP
  4061. markContext.GetPageAllocator()->debugName = _u("ConcurrentCollect");
  4062. #endif
  4063. if (!threadService->HasCallback())
  4064. {
  4065. #ifdef IDLE_DECOMMIT_ENABLED
  4066. concurrentIdleDecommitEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4067. if (concurrentIdleDecommitEvent == nullptr)
  4068. {
  4069. throw Js::OutOfMemoryException();
  4070. }
  4071. #endif
  4072. concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4073. if (concurrentWorkReadyEvent == nullptr)
  4074. {
  4075. throw Js::OutOfMemoryException();
  4076. }
  4077. }
  4078. }
  4079. catch (Js::OutOfMemoryException)
  4080. {
  4081. Assert(concurrentWorkReadyEvent == nullptr);
  4082. if (concurrentWorkDoneEvent)
  4083. {
  4084. CloseHandle(concurrentWorkDoneEvent);
  4085. concurrentWorkDoneEvent = nullptr;
  4086. }
  4087. #ifdef IDLE_DECOMMIT_ENABLED
  4088. if (concurrentIdleDecommitEvent)
  4089. {
  4090. CloseHandle(concurrentIdleDecommitEvent);
  4091. concurrentIdleDecommitEvent = nullptr;
  4092. }
  4093. #endif
  4094. return false;
  4095. }
  4096. return true;
  4097. }
  4098. #pragma prefast(suppress:6262, "Where this function is call should have ample of stack space")
  4099. bool Recycler::AbortConcurrent(bool restoreState)
  4100. {
  4101. Assert(!this->CollectionInProgress() || this->IsConcurrentState());
  4102. // In case the thread already died, wait for that too
  4103. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4104. // Note, concurrentThread will be null if we have a threadService.
  4105. Assert(concurrentThread != NULL || threadService->HasCallback());
  4106. DWORD handleCount = (concurrentThread == NULL ? 1 : 2);
  4107. DWORD ret = WAIT_OBJECT_0;
  4108. if (this->IsConcurrentState())
  4109. {
  4110. this->isAborting = true;
  4111. if (this->concurrentThread != NULL)
  4112. {
  4113. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4114. }
  4115. ret = WaitForMultipleObjectsEx(handleCount, handle, FALSE, INFINITE, FALSE);
  4116. this->isAborting = false;
  4117. Assert(this->IsConcurrentFinishedState() || ret == WAIT_OBJECT_0 + 1);
  4118. if (ret == WAIT_OBJECT_0 && restoreState)
  4119. {
  4120. if (collectionState == CollectionStateRescanWait)
  4121. {
  4122. this->ResetMarkCollectionState();
  4123. }
  4124. else if (collectionState == CollectionStateTransferSweptWait)
  4125. {
  4126. // Make sure we don't do another GC after finishing this one.
  4127. this->inExhaustiveCollection = false;
  4128. // Let's just finish the sweep so that GC is in a consistent state, but don't run dispose
  4129. // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
  4130. // FinishConcurrentCollect can wait for it again.
  4131. SetEvent(this->concurrentWorkDoneEvent);
  4132. EnsureNotCollecting();
  4133. }
  4134. else
  4135. {
  4136. Assert(UNREACHED);
  4137. }
  4138. Assert(collectionState == CollectionStateNotCollecting);
  4139. Assert(this->isProcessingRescan == false);
  4140. }
  4141. else
  4142. {
  4143. // Even if we weren't asked to restore states, we need to clean up the pending guest arena
  4144. CleanupPendingUnroot();
  4145. // Also need to release any pages held by the mark stack, if we abandoned it
  4146. markContext.Abort();
  4147. }
  4148. }
  4149. Assert(!this->hasPendingDeleteGuestArena);
  4150. return ret == WAIT_OBJECT_0;
  4151. }
  4152. void
  4153. Recycler::CleanupPendingUnroot()
  4154. {
  4155. Assert(!this->hasPendingConcurrentFindRoot);
  4156. if (hasPendingUnpinnedObject)
  4157. {
  4158. pinnedObjectMap.MapAndRemoveIf([](void * obj, PinRecord const &refCount)
  4159. {
  4160. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  4161. #ifdef STACK_BACK_TRACE
  4162. Assert(refCount != 0 || refCount.stackBackTraces == nullptr);
  4163. #endif
  4164. #endif
  4165. return refCount == 0;
  4166. });
  4167. hasPendingUnpinnedObject = false;
  4168. }
  4169. if (hasPendingDeleteGuestArena)
  4170. {
  4171. DebugOnly(bool foundPendingDelete = false);
  4172. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4173. while (guestArenaIter.Next())
  4174. {
  4175. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4176. if (allocator.pendingDelete)
  4177. {
  4178. allocator.SetLockBlockList(false);
  4179. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4180. DebugOnly(foundPendingDelete = true);
  4181. }
  4182. }
  4183. hasPendingDeleteGuestArena = false;
  4184. Assert(foundPendingDelete);
  4185. }
  4186. #if DBG
  4187. else
  4188. {
  4189. DListBase<GuestArenaAllocator>::Iterator guestArenaIter(&guestArenaList);
  4190. while (guestArenaIter.Next())
  4191. {
  4192. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4193. Assert(!allocator.pendingDelete);
  4194. }
  4195. }
  4196. #endif
  4197. }
  4198. void
  4199. Recycler::FinalizeConcurrent(bool restoreState)
  4200. {
  4201. bool needCleanExitState = restoreState;
  4202. #if defined(RECYCLER_DUMP_OBJECT_GRAPH)
  4203. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().DumpObjectGraphOnExit;
  4204. #endif
  4205. #ifdef LEAK_REPORT
  4206. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  4207. #endif
  4208. #ifdef CHECK_MEMORY_LEAK
  4209. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().CheckMemoryLeak;
  4210. #endif
  4211. bool aborted = AbortConcurrent(needCleanExitState);
  4212. collectionState = CollectionStateExit;
  4213. if (aborted && this->concurrentThread != NULL)
  4214. {
  4215. // In case the thread already died, wait for that too
  4216. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4217. SetEvent(concurrentWorkReadyEvent);
  4218. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4219. // In case the thread already died, wait for that too
  4220. DWORD fRet = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4221. AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
  4222. }
  4223. // Shutdown parallel threads and return the handle for them so the caller can
  4224. // close it.
  4225. parallelThread1.Shutdown();
  4226. parallelThread2.Shutdown();
  4227. #ifdef IDLE_DECOMMIT_ENABLED
  4228. if (concurrentIdleDecommitEvent != nullptr)
  4229. {
  4230. CloseHandle(concurrentIdleDecommitEvent);
  4231. concurrentIdleDecommitEvent = nullptr;
  4232. }
  4233. #endif
  4234. CloseHandle(concurrentWorkDoneEvent);
  4235. concurrentWorkDoneEvent = nullptr;
  4236. if (concurrentWorkReadyEvent != NULL)
  4237. {
  4238. CloseHandle(concurrentWorkReadyEvent);
  4239. concurrentWorkReadyEvent = nullptr;
  4240. }
  4241. if (needCleanExitState)
  4242. {
  4243. // We may do another marking pass to look for memory leaks;
  4244. // Since we have shut down the concurrent thread, don't do a parallel mark.
  4245. this->enableConcurrentMark = false;
  4246. this->enableParallelMark = false;
  4247. this->enableConcurrentSweep = false;
  4248. }
  4249. this->threadService = nullptr;
  4250. if (concurrentThread != NULL)
  4251. {
  4252. CloseHandle(concurrentThread);
  4253. this->concurrentThread = nullptr;
  4254. }
  4255. }
  4256. bool
  4257. Recycler::EnableConcurrent(JsUtil::ThreadService *threadService, bool startAllThreads)
  4258. {
  4259. if (this->disableConcurrent)
  4260. {
  4261. return false;
  4262. }
  4263. if (!this->InitializeConcurrent(threadService))
  4264. {
  4265. return false;
  4266. }
  4267. #if ENABLE_DEBUG_CONFIG_OPTIONS
  4268. this->enableConcurrentMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase);
  4269. this->enableParallelMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase);
  4270. this->enableConcurrentSweep = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase);
  4271. #else
  4272. this->enableConcurrentMark = true;
  4273. this->enableParallelMark = true;
  4274. this->enableConcurrentSweep = true;
  4275. #endif
  4276. if (this->enableParallelMark && this->maxParallelism == 1)
  4277. {
  4278. // Disable parallel mark if only 1 CPU
  4279. this->enableParallelMark = false;
  4280. }
  4281. if (threadService->HasCallback())
  4282. {
  4283. this->threadService = threadService;
  4284. return true;
  4285. }
  4286. else
  4287. {
  4288. bool startConcurrentThread = true;
  4289. bool startedParallelThread1 = false;
  4290. bool startedParallelThread2 = false;
  4291. if (startAllThreads)
  4292. {
  4293. if (this->enableParallelMark && this->maxParallelism > 2)
  4294. {
  4295. if (!parallelThread1.EnableConcurrent(true))
  4296. {
  4297. startConcurrentThread = false;
  4298. }
  4299. else
  4300. {
  4301. startedParallelThread1 = true;
  4302. if (this->maxParallelism > 3)
  4303. {
  4304. if (!parallelThread2.EnableConcurrent(true))
  4305. {
  4306. startConcurrentThread = false;
  4307. }
  4308. else
  4309. {
  4310. startedParallelThread2 = true;
  4311. }
  4312. }
  4313. }
  4314. }
  4315. }
  4316. if (startConcurrentThread)
  4317. {
  4318. HANDLE concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &Recycler::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
  4319. if (concurrentThread != nullptr)
  4320. {
  4321. // Wait for recycler thread to initialize
  4322. HANDLE handle[2] = { this->concurrentWorkDoneEvent, concurrentThread };
  4323. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4324. if (ret == WAIT_OBJECT_0)
  4325. {
  4326. this->threadService = threadService;
  4327. this->concurrentThread = concurrentThread;
  4328. return true;
  4329. }
  4330. CloseHandle(concurrentThread);
  4331. }
  4332. }
  4333. if (startedParallelThread1)
  4334. {
  4335. parallelThread1.Shutdown();
  4336. if (startedParallelThread2)
  4337. {
  4338. parallelThread2.Shutdown();
  4339. }
  4340. }
  4341. }
  4342. // We failed to start a concurrent thread so we set these back to false and clean up
  4343. this->enableConcurrentMark = false;
  4344. this->enableParallelMark = false;
  4345. this->enableConcurrentSweep = false;
  4346. if (concurrentWorkReadyEvent)
  4347. {
  4348. CloseHandle(concurrentWorkReadyEvent);
  4349. concurrentWorkReadyEvent = nullptr;
  4350. }
  4351. if (concurrentWorkDoneEvent)
  4352. {
  4353. CloseHandle(concurrentWorkDoneEvent);
  4354. concurrentWorkDoneEvent = nullptr;
  4355. }
  4356. #ifdef IDLE_DECOMMIT_ENABLED
  4357. if (concurrentIdleDecommitEvent)
  4358. {
  4359. CloseHandle(concurrentIdleDecommitEvent);
  4360. concurrentIdleDecommitEvent = nullptr;
  4361. }
  4362. #endif
  4363. return false;
  4364. }
  4365. void
  4366. Recycler::ShutdownThread()
  4367. {
  4368. if (this->IsConcurrentEnabled())
  4369. {
  4370. Assert(concurrentThread != NULL || threadService->HasCallback());
  4371. FinalizeConcurrent(false);
  4372. }
  4373. }
  4374. void
  4375. Recycler::DisableConcurrent()
  4376. {
  4377. if (this->IsConcurrentEnabled())
  4378. {
  4379. Assert(concurrentThread != NULL || threadService->HasCallback());
  4380. FinalizeConcurrent(true);
  4381. this->collectionState = CollectionStateNotCollecting;
  4382. }
  4383. }
  4384. bool
  4385. Recycler::StartConcurrent(CollectionState const state)
  4386. {
  4387. // Reset the tick count to detect if the concurrent thread is taking too long
  4388. tickCountStartConcurrent = GetTickCount();
  4389. CollectionState oldState = this->collectionState;
  4390. this->collectionState = state;
  4391. if (threadService->HasCallback())
  4392. {
  4393. Assert(concurrentThread == NULL);
  4394. Assert(concurrentWorkReadyEvent == NULL);
  4395. if (!threadService->Invoke(Recycler::StaticBackgroundWorkCallback, this))
  4396. {
  4397. this->collectionState = oldState;
  4398. return false;
  4399. }
  4400. return true;
  4401. }
  4402. else
  4403. {
  4404. Assert(concurrentThread != NULL);
  4405. Assert(concurrentWorkReadyEvent != NULL);
  4406. SetEvent(concurrentWorkReadyEvent);
  4407. return true;
  4408. }
  4409. }
  4410. BOOL
  4411. Recycler::StartBackgroundMarkCollect()
  4412. {
  4413. #ifdef RECYCLER_TRACE
  4414. PrintCollectTrace(Js::ConcurrentMarkPhase);
  4415. #endif
  4416. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4417. // Asynchronous concurrent mark
  4418. BOOL success = StartAsynchronousBackgroundMark();
  4419. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4420. return success;
  4421. }
  4422. BOOL
  4423. Recycler::StartBackgroundMark(bool foregroundResetMark, bool foregroundFindRoots)
  4424. {
  4425. Assert(!this->CollectionInProgress());
  4426. CollectionState backgroundState = CollectionStateConcurrentResetMarks;
  4427. bool doBackgroundFindRoots = true;
  4428. if (foregroundResetMark || foregroundFindRoots)
  4429. {
  4430. // REVIEW: SWB, if there's only write barrier page change, we don't scan and mark?
  4431. #ifdef RECYCLER_WRITE_WATCH
  4432. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  4433. {
  4434. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  4435. bool hasWriteWatch = (recyclerPageAllocator.ResetWriteWatch() && recyclerLargeBlockPageAllocator.ResetWriteWatch());
  4436. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  4437. if (!hasWriteWatch)
  4438. {
  4439. // Disable concurrent mark
  4440. this->enableConcurrentMark = false;
  4441. return false;
  4442. }
  4443. }
  4444. #endif
  4445. // In-thread synchronized GC on the concurrent thread
  4446. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_SynchronizedImplicitRoots : ResetMarkFlags_Synchronized);
  4447. if (foregroundFindRoots)
  4448. {
  4449. this->collectionState = CollectionStateFindRoots;
  4450. FindRoots();
  4451. ScanStack();
  4452. Assert(collectionState == CollectionStateFindRoots);
  4453. backgroundState = CollectionStateConcurrentMark;
  4454. doBackgroundFindRoots = false;
  4455. }
  4456. else
  4457. {
  4458. // Do find roots in the background
  4459. backgroundState = CollectionStateConcurrentFindRoots;
  4460. }
  4461. }
  4462. if (doBackgroundFindRoots)
  4463. {
  4464. this->PrepareBackgroundFindRoots();
  4465. }
  4466. if (!StartConcurrent(backgroundState))
  4467. {
  4468. if (doBackgroundFindRoots)
  4469. {
  4470. this->RevertPrepareBackgroundFindRoots();
  4471. }
  4472. this->collectionState = CollectionStateNotCollecting;
  4473. return false;
  4474. }
  4475. return true;
  4476. }
  4477. BOOL
  4478. Recycler::StartAsynchronousBackgroundMark()
  4479. {
  4480. // Debug flags to turn off background reset mark or background find roots, default to doing every concurrently
  4481. return StartBackgroundMark(CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundResetMarksPhase), CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundFindRootsPhase));
  4482. }
  4483. BOOL
  4484. Recycler::StartSynchronousBackgroundMark()
  4485. {
  4486. return StartBackgroundMark(true, true);
  4487. }
  4488. BOOL
  4489. Recycler::StartConcurrentSweepCollect()
  4490. {
  4491. Assert(collectionState == CollectionStateNotCollecting);
  4492. #ifdef RECYCLER_TRACE
  4493. PrintCollectTrace(Js::ConcurrentSweepPhase);
  4494. #endif
  4495. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4496. this->Mark();
  4497. // We don't have rescan data if we disabled concurrent mark, assume the worst
  4498. // (which means it is harder to get into partial collect mode)
  4499. #if ENABLE_PARTIAL_GC
  4500. bool needConcurrentSweep = this->Sweep(RecyclerSweep::MaxPartialCollectRescanRootBytes, true, true);
  4501. #else
  4502. bool needConcurrentSweep = this->Sweep(true);
  4503. #endif
  4504. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4505. FinishCollection(needConcurrentSweep);
  4506. return true;
  4507. }
  4508. size_t
  4509. Recycler::BackgroundRepeatMark()
  4510. {
  4511. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRepeatMarkPhase);
  4512. Assert(this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount - 1);
  4513. size_t rescannedPageCount = this->BackgroundRescan(RescanFlags_ResetWriteWatch);
  4514. if (this->NeedOOMRescan() || this->isAborting)
  4515. {
  4516. // OOM'ed. Let's not continue
  4517. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4518. return Recycler::InvalidScanRootBytes;
  4519. }
  4520. // Rescan the stack
  4521. this->BackgroundScanStack();
  4522. // Process mark stack
  4523. this->DoBackgroundParallelMark();
  4524. if (this->NeedOOMRescan())
  4525. {
  4526. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4527. return Recycler::InvalidScanRootBytes;
  4528. }
  4529. #ifdef RECYCLER_STATS
  4530. Assert(this->backgroundRescanCount >= 1 && this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount);
  4531. this->collectionStats.backgroundMarkData[this->backgroundRescanCount - 1] = this->collectionStats.markData;
  4532. #endif
  4533. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4534. return rescannedPageCount;
  4535. }
  4536. char* Recycler::GetScriptThreadStackTop()
  4537. {
  4538. // We should have already checked if the recycler is thread bound or not
  4539. Assert(mainThreadHandle != NULL);
  4540. return (char*) savedThreadContext.GetStackTop();
  4541. }
  4542. size_t
  4543. Recycler::BackgroundScanStack()
  4544. {
  4545. if (this->skipStack)
  4546. {
  4547. #ifdef RECYCLER_TRACE
  4548. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  4549. #endif
  4550. return 0;
  4551. }
  4552. if (!this->isInScript || mainThreadHandle == nullptr)
  4553. {
  4554. // No point in scanning the main thread's stack if we are not in script
  4555. // We also can't scan the main thread's stack if we are not thread bounded, and didn't create the main thread's handle
  4556. return 0;
  4557. }
  4558. char* stackTop = this->GetScriptThreadStackTop();
  4559. if (stackTop != nullptr)
  4560. {
  4561. size_t size = (char *)stackBase - stackTop;
  4562. ScanMemoryInline<false>((void **)stackTop, size
  4563. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  4564. return size;
  4565. }
  4566. return 0;
  4567. }
  4568. void
  4569. Recycler::BackgroundMark()
  4570. {
  4571. Assert(this->DoQueueTrackedObject());
  4572. this->backgroundRescanCount = 0;
  4573. this->DoBackgroundParallelMark();
  4574. if (this->NeedOOMRescan() || this->isAborting)
  4575. {
  4576. return;
  4577. }
  4578. #ifdef RECYCLER_STATS
  4579. this->collectionStats.backgroundMarkData[0] = this->collectionStats.markData;
  4580. #endif
  4581. if (PHASE_OFF1(Js::BackgroundRepeatMarkPhase))
  4582. {
  4583. return;
  4584. }
  4585. // We always do one repeat mark pass.
  4586. size_t rescannedPageCount = this->BackgroundRepeatMark();
  4587. if (this->NeedOOMRescan() || this->isAborting)
  4588. {
  4589. // OOM'ed. Let's not continue
  4590. return;
  4591. }
  4592. Assert(rescannedPageCount != Recycler::InvalidScanRootBytes);
  4593. // If we rescanned enough pages in the previous repeat mark pass, then do one more
  4594. // to try to reduce the amount of work we need to do in-thread
  4595. if (rescannedPageCount >= RecyclerHeuristic::BackgroundSecondRepeatMarkThreshold)
  4596. {
  4597. this->BackgroundRepeatMark();
  4598. if (this->NeedOOMRescan() || this->isAborting)
  4599. {
  4600. // OOM'ed. Let's not continue
  4601. return;
  4602. }
  4603. }
  4604. }
  4605. void
  4606. Recycler::BackgroundResetMarks()
  4607. {
  4608. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundResetMarksPhase);
  4609. GCETW(GC_BACKGROUNDRESETMARKS_START, (this));
  4610. Assert(IsMarkStackEmpty());
  4611. this->scanPinnedObjectMap = true;
  4612. this->hasScannedInitialImplicitRoots = false;
  4613. heapBlockMap.ResetMarks();
  4614. autoHeap.ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InBackgroundThreadImplicitRoots : ResetMarkFlags_InBackgroundThread);
  4615. GCETW(GC_BACKGROUNDRESETMARKS_STOP, (this));
  4616. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundResetMarksPhase);
  4617. }
  4618. void
  4619. Recycler::PrepareBackgroundFindRoots()
  4620. {
  4621. Assert(!this->hasPendingConcurrentFindRoot);
  4622. this->hasPendingConcurrentFindRoot = true;
  4623. // Save the thread context here. The background thread
  4624. // will use this saved context for the marking instead of
  4625. // trying to get the live thread context of the thread
  4626. SAVE_THREAD_CONTEXT();
  4627. // Temporarily disable resize so the background can scan without
  4628. // the memory being freed from under it
  4629. pinnedObjectMap.DisableResize();
  4630. // Update the cached info for big blocks in the guest arena
  4631. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4632. while (guestArenaIter.Next())
  4633. {
  4634. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4635. allocator.SetLockBlockList(true);
  4636. if (allocator.pendingDelete)
  4637. {
  4638. Assert(this->hasPendingDeleteGuestArena);
  4639. allocator.SetLockBlockList(false);
  4640. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4641. }
  4642. else if (this->backgroundFinishMarkCount == 0)
  4643. {
  4644. // Update the cached info for big block
  4645. allocator.GetBigBlocks(false);
  4646. }
  4647. }
  4648. this->hasPendingDeleteGuestArena = false;
  4649. }
  4650. void
  4651. Recycler::RevertPrepareBackgroundFindRoots()
  4652. {
  4653. Assert(this->hasPendingConcurrentFindRoot);
  4654. this->hasPendingConcurrentFindRoot = false;
  4655. pinnedObjectMap.EnableResize();
  4656. }
  4657. size_t
  4658. Recycler::BackgroundFindRoots()
  4659. {
  4660. #ifdef RECYCLER_STATS
  4661. size_t lastMarkCount = this->collectionStats.markData.markCount;
  4662. #endif
  4663. size_t scanRootBytes = 0;
  4664. Assert(this->IsConcurrentFindRootState());
  4665. Assert(this->hasPendingConcurrentFindRoot);
  4666. #if ENABLE_PARTIAL_GC
  4667. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4668. #else
  4669. Assert(this->DoQueueTrackedObject());
  4670. #endif
  4671. // Only mark pinned object and guest arenas, which is where most of the roots are.
  4672. // When we go back to the main thread to rescan, we will scan the rest of the root.
  4673. // NOTE: purposefully not marking the transientPinnedObject there. as it is transient :)
  4674. // background mark the pinned object. Since we are in concurrent find root state
  4675. // the main thread won't delete any entries from the map, so concurrent read
  4676. // to the map safe.
  4677. GCETW(GC_BACKGROUNDSCANROOTS_START, (this));
  4678. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundFindRootsPhase);
  4679. scanRootBytes += this->ScanPinnedObjects</*background = */true>();
  4680. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::FindRootArenaPhase);
  4681. // background mark the guest arenas. Since we are in concurrent find root state
  4682. // the main thread won't delete any arena, so concurrent reads to them are ok.
  4683. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4684. while (guestArenaIter.Next())
  4685. {
  4686. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4687. if (allocator.pendingDelete)
  4688. {
  4689. // Skip guest arena that are already marked for delete
  4690. Assert(this->hasPendingDeleteGuestArena);
  4691. continue;
  4692. }
  4693. scanRootBytes += ScanArena(&allocator, true);
  4694. }
  4695. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::FindRootArenaPhase);
  4696. this->ScanImplicitRoots();
  4697. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundFindRootsPhase);
  4698. this->hasPendingConcurrentFindRoot = false;
  4699. this->collectionState = CollectionStateConcurrentMark;
  4700. GCETW(GC_BACKGROUNDSCANROOTS_STOP, (this));
  4701. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  4702. return scanRootBytes;
  4703. }
  4704. size_t
  4705. Recycler::BackgroundFinishMark()
  4706. {
  4707. #if ENABLE_PARTIAL_GC
  4708. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4709. #else
  4710. Assert(this->DoQueueTrackedObject());
  4711. #endif
  4712. Assert(collectionState == CollectionStateConcurrentFinishMark);
  4713. size_t rescannedRootBytes = FinishMarkRescan(true) * AutoSystemInfo::PageSize;
  4714. this->collectionState = CollectionStateConcurrentFindRoots;
  4715. rescannedRootBytes += this->BackgroundFindRoots();
  4716. this->collectionState = CollectionStateConcurrentFinishMark;
  4717. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::MarkPhase);
  4718. ProcessMark(true);
  4719. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::MarkPhase);
  4720. return rescannedRootBytes;
  4721. }
  4722. void
  4723. Recycler::SweepPendingObjects(RecyclerSweep& recyclerSweep)
  4724. {
  4725. autoHeap.SweepPendingObjects(recyclerSweep);
  4726. }
  4727. void
  4728. Recycler::ConcurrentTransferSweptObjects(RecyclerSweep& recyclerSweep)
  4729. {
  4730. Assert(!recyclerSweep.IsBackground());
  4731. Assert((this->collectionState & Collection_TransferSwept) == Collection_TransferSwept);
  4732. #if ENABLE_PARTIAL_GC
  4733. if (this->hasBackgroundFinishPartial)
  4734. {
  4735. this->hasBackgroundFinishPartial = false;
  4736. this->ClearPartialCollect();
  4737. }
  4738. #endif
  4739. autoHeap.ConcurrentTransferSweptObjects(recyclerSweep);
  4740. }
  4741. #if ENABLE_PARTIAL_GC
  4742. void
  4743. Recycler::ConcurrentPartialTransferSweptObjects(RecyclerSweep& recyclerSweep)
  4744. {
  4745. Assert(!recyclerSweep.IsBackground());
  4746. Assert(!this->hasBackgroundFinishPartial);
  4747. autoHeap.ConcurrentPartialTransferSweptObjects(recyclerSweep);
  4748. }
  4749. #endif
  4750. BOOL
  4751. Recycler::FinishConcurrentCollectWrapped(CollectionFlags flags)
  4752. {
  4753. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  4754. #if ENABLE_CONCURRENT_GC
  4755. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  4756. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  4757. #endif
  4758. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::FinishConcurrentCollect, flags);
  4759. return collected;
  4760. }
  4761. BOOL
  4762. Recycler::WaitForConcurrentThread(DWORD waitTime)
  4763. {
  4764. Assert(this->IsConcurrentState() || this->collectionState == CollectionStateParallelMark);
  4765. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ConcurrentWaitPhase);
  4766. if (concurrentThread != NULL)
  4767. {
  4768. // Set the priority back to normal before we wait to ensure it doesn't starve
  4769. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4770. }
  4771. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, waitTime);
  4772. if (concurrentThread != NULL)
  4773. {
  4774. if (ret == WAIT_TIMEOUT)
  4775. {
  4776. // Keep the priority boost.
  4777. priorityBoost = true;
  4778. }
  4779. else
  4780. {
  4781. Assert(ret == WAIT_OBJECT_0);
  4782. // Back to below normal
  4783. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_BELOW_NORMAL);
  4784. priorityBoost = false;
  4785. }
  4786. }
  4787. RECYCLER_PROFILE_EXEC_END(this, Js::ConcurrentWaitPhase);
  4788. return (ret == WAIT_OBJECT_0);
  4789. }
  4790. #if ENABLE_BACKGROUND_PAGE_FREEING
  4791. void
  4792. Recycler::FlushBackgroundPages()
  4793. {
  4794. recyclerPageAllocator.SuspendIdleDecommit();
  4795. recyclerPageAllocator.FlushBackgroundPages();
  4796. recyclerPageAllocator.ResumeIdleDecommit();
  4797. recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
  4798. recyclerLargeBlockPageAllocator.FlushBackgroundPages();
  4799. recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
  4800. this->threadPageAllocator->SuspendIdleDecommit();
  4801. this->threadPageAllocator->FlushBackgroundPages();
  4802. this->threadPageAllocator->ResumeIdleDecommit();
  4803. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  4804. recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
  4805. recyclerWithBarrierPageAllocator.FlushBackgroundPages();
  4806. recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
  4807. #endif
  4808. }
  4809. #endif
  4810. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4811. AutoProtectPages::AutoProtectPages(Recycler* recycler, bool protectEnabled) :
  4812. isReadOnly(false),
  4813. recycler(recycler)
  4814. {
  4815. if (protectEnabled)
  4816. {
  4817. recycler->heapBlockMap.MakeAllPagesReadOnly(recycler);
  4818. isReadOnly = true;
  4819. }
  4820. }
  4821. AutoProtectPages::~AutoProtectPages()
  4822. {
  4823. Unprotect();
  4824. }
  4825. void AutoProtectPages::Unprotect()
  4826. {
  4827. if (isReadOnly)
  4828. {
  4829. recycler->heapBlockMap.MakeAllPagesReadWrite(recycler);
  4830. isReadOnly = false;
  4831. }
  4832. }
  4833. #endif
  4834. BOOL
  4835. Recycler::FinishConcurrentCollect(CollectionFlags flags)
  4836. {
  4837. if (!this->IsConcurrentState())
  4838. {
  4839. Assert(false);
  4840. return false;
  4841. }
  4842. #ifdef PROFILE_EXEC
  4843. Js::Phase concurrentPhase = Js::ConcurrentCollectPhase;
  4844. // TODO: Remove this workaround for unreferenced local after enabled -profile for GC
  4845. static_cast<Js::Phase>(concurrentPhase);
  4846. #endif
  4847. #if ENABLE_PARTIAL_GC
  4848. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  4849. (concurrentPhase = ((this->inPartialCollectMode && this->IsConcurrentMarkState())?
  4850. Js::ConcurrentPartialCollectPhase : Js::ConcurrentCollectPhase)));
  4851. #else
  4852. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  4853. (concurrentPhase = Js::ConcurrentCollectPhase));
  4854. #endif
  4855. // Don't do concurrent sweep if we have priority boosted.
  4856. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  4857. bool concurrent = (flags & CollectMode_Concurrent) != 0;
  4858. concurrent = concurrent && (!priorityBoost || this->backgroundRescanCount != 1);
  4859. #ifdef RECYCLER_TRACE
  4860. collectionParam.priorityBoostConcurrentSweepOverride = priorityBoost;
  4861. #endif
  4862. const DWORD waitTime = forceInThread? INFINITE : RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  4863. GCETW(GC_FINISHCONCURRENTWAIT_START, (this, waitTime));
  4864. const BOOL waited = WaitForConcurrentThread(waitTime);
  4865. GCETW(GC_FINISHCONCURRENTWAIT_STOP, (this, !waited));
  4866. if (!waited)
  4867. {
  4868. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4869. return false;
  4870. }
  4871. bool needConcurrentSweep = false;
  4872. if (collectionState == CollectionStateRescanWait)
  4873. {
  4874. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentRescan));
  4875. #ifdef RECYCLER_TRACE
  4876. #if ENABLE_PARTIAL_GC
  4877. PrintCollectTrace(this->inPartialCollectMode ? Js::ConcurrentPartialCollectPhase : Js::ConcurrentMarkPhase, true);
  4878. #else
  4879. PrintCollectTrace(Js::ConcurrentMarkPhase, true);
  4880. #endif
  4881. #endif
  4882. collectionState = CollectionStateRescanFindRoots;
  4883. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4884. // TODO: Change this behavior
  4885. // ProtectPagesOnRescan is not supported in PageHeap mode because the page protection is changed
  4886. // outside the PageAllocator in PageHeap mode and so pages are not in the state that the
  4887. // PageAllocator expects when it goes to change the page protection
  4888. // One viable fix is to move the guard page protection logic outside of the heap blocks
  4889. // and into the page allocator
  4890. AssertMsg(!(IsPageHeapEnabled() && GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan), "ProtectPagesOnRescan not supported in page heap mode");
  4891. AutoProtectPages protectPages(this, GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan);
  4892. #endif
  4893. const bool backgroundFinishMark = !forceInThread && concurrent && ((flags & CollectOverride_BackgroundFinishMark) != 0);
  4894. const DWORD finishMarkWaitTime = RecyclerHeuristic::BackgroundFinishMarkWaitTime(backgroundFinishMark, GetRecyclerFlagsTable());
  4895. size_t rescanRootBytes = FinishMark(finishMarkWaitTime);
  4896. if (rescanRootBytes == Recycler::InvalidScanRootBytes)
  4897. {
  4898. Assert(this->IsMarkState());
  4899. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4900. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  4901. // we timeout trying to mark.
  4902. return false;
  4903. }
  4904. #ifdef RECYCLER_STATS
  4905. collectionStats.continueCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  4906. #endif
  4907. #ifdef RECYCLER_VERIFY_MARK
  4908. if (GetRecyclerFlagsTable().RecyclerVerifyMark)
  4909. {
  4910. this->VerifyMark();
  4911. }
  4912. #endif
  4913. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4914. protectPages.Unprotect();
  4915. #endif
  4916. #if ENABLE_PARTIAL_GC
  4917. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  4918. #else
  4919. needConcurrentSweep = this->Sweep(concurrent);
  4920. #endif
  4921. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  4922. }
  4923. else
  4924. {
  4925. FinishTransferSwept(flags);
  4926. }
  4927. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4928. FinishCollection(needConcurrentSweep);
  4929. if (!this->CollectionInProgress())
  4930. {
  4931. if (NeedExhaustiveRepeatCollect())
  4932. {
  4933. DoCollect((CollectionFlags)(flags & ~CollectMode_Partial));
  4934. }
  4935. else
  4936. {
  4937. EndCollection();
  4938. }
  4939. }
  4940. return true;
  4941. }
  4942. void
  4943. Recycler::FinishTransferSwept(CollectionFlags flags)
  4944. {
  4945. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentTransferSwept));
  4946. GCETW(GC_FLUSHZEROPAGE_START, (this));
  4947. Assert(collectionState == CollectionStateTransferSweptWait);
  4948. #ifdef RECYCLER_TRACE
  4949. PrintCollectTrace(Js::ConcurrentSweepPhase, true);
  4950. #endif
  4951. collectionState = CollectionStateTransferSwept;
  4952. #if ENABLE_BACKGROUND_PAGE_FREEING
  4953. if (CONFIG_FLAG(EnableBGFreeZero))
  4954. {
  4955. // We should have zeroed all the pages in the background thread
  4956. Assert(!recyclerPageAllocator.HasZeroQueuedPages());
  4957. Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
  4958. this->FlushBackgroundPages();
  4959. }
  4960. #endif
  4961. GCETW(GC_FLUSHZEROPAGE_STOP, (this));
  4962. GCETW(GC_TRANSFERSWEPTOBJECTS_START, (this));
  4963. Assert(this->recyclerSweep != nullptr);
  4964. Assert(!this->recyclerSweep->IsBackground());
  4965. #if ENABLE_PARTIAL_GC
  4966. if (this->inPartialCollectMode)
  4967. {
  4968. ConcurrentPartialTransferSweptObjects(*this->recyclerSweep);
  4969. }
  4970. else
  4971. #endif
  4972. {
  4973. ConcurrentTransferSweptObjects(*this->recyclerSweep);
  4974. }
  4975. recyclerSweep->EndSweep();
  4976. GCETW(GC_TRANSFERSWEPTOBJECTS_STOP, (this));
  4977. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentTransferSwept));
  4978. }
  4979. #if !DISABLE_SEH
  4980. int
  4981. Recycler::ExceptFilter(LPEXCEPTION_POINTERS pEP)
  4982. {
  4983. #if DBG
  4984. // Assert exception code
  4985. if (pEP->ExceptionRecord->ExceptionCode == STATUS_ASSERTION_FAILURE)
  4986. {
  4987. return EXCEPTION_CONTINUE_SEARCH;
  4988. }
  4989. #endif
  4990. #ifdef GENERATE_DUMP
  4991. if (Js::Configuration::Global.flags.IsEnabled(Js::DumpOnCrashFlag))
  4992. {
  4993. Js::Throw::GenerateDump(pEP, Js::Configuration::Global.flags.DumpOnCrash);
  4994. }
  4995. #endif
  4996. #if DBG && _M_IX86
  4997. int callerEBP = *((int*)pEP->ContextRecord->Ebp);
  4998. Output::Print(_u("Recycler Concurrent Thread: Uncaught exception: EIP: 0x%X ExceptionCode: 0x%X EBP: 0x%X ReturnAddress: 0x%X ReturnAddress2: 0x%X\n"),
  4999. pEP->ExceptionRecord->ExceptionAddress, pEP->ExceptionRecord->ExceptionCode, pEP->ContextRecord->Eip,
  5000. pEP->ContextRecord->Ebp, *((int*)pEP->ContextRecord->Ebp + 1), *((int*) callerEBP + 1));
  5001. #endif
  5002. Output::Flush();
  5003. return EXCEPTION_CONTINUE_SEARCH;
  5004. }
  5005. #endif
  5006. unsigned int
  5007. Recycler::StaticThreadProc(LPVOID lpParameter)
  5008. {
  5009. DWORD ret = (DWORD)-1;
  5010. #if !DISABLE_SEH
  5011. __try
  5012. {
  5013. #endif
  5014. Recycler * recycler = (Recycler *)lpParameter;
  5015. #if DBG
  5016. recycler->concurrentThreadExited = false;
  5017. #endif
  5018. ret = recycler->ThreadProc();
  5019. #if !DISABLE_SEH
  5020. }
  5021. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  5022. {
  5023. Assert(false);
  5024. }
  5025. #endif
  5026. return ret;
  5027. }
  5028. void
  5029. Recycler::StaticBackgroundWorkCallback(void * callbackData)
  5030. {
  5031. Recycler * recycler = (Recycler *) callbackData;
  5032. recycler->DoBackgroundWork(true);
  5033. }
  5034. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  5035. static ETWEventGCActivationKind
  5036. BackgroundMarkETWEventGCActivationKind(CollectionState collectionState)
  5037. {
  5038. return collectionState == CollectionStateConcurrentFinishMark?
  5039. ETWEvent_ConcurrentFinishMark : ETWEvent_ConcurrentMark;
  5040. }
  5041. #endif
  5042. void
  5043. Recycler::DoBackgroundWork(bool forceForeground)
  5044. {
  5045. if (this->collectionState == CollectionStateConcurrentWrapperCallback)
  5046. {
  5047. this->collectionWrapper->ConcurrentCallback();
  5048. }
  5049. else if (this->collectionState == CollectionStateParallelMark)
  5050. {
  5051. this->ProcessParallelMark(false, &this->markContext);
  5052. }
  5053. else if (this->IsConcurrentMarkState())
  5054. {
  5055. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, this->collectionState == CollectionStateConcurrentFinishMark ?
  5056. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5057. GCETW_INTERNAL(GC_START, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5058. DebugOnly(this->markContext.GetPageAllocator()->SetConcurrentThreadId(::GetCurrentThreadId()));
  5059. Assert(this->enableConcurrentMark);
  5060. if (this->collectionState != CollectionStateConcurrentFinishMark)
  5061. {
  5062. this->StartQueueTrackedObject();
  5063. }
  5064. switch (this->collectionState)
  5065. {
  5066. case CollectionStateConcurrentResetMarks:
  5067. this->BackgroundResetMarks();
  5068. this->BackgroundResetWriteWatchAll();
  5069. this->collectionState = CollectionStateConcurrentFindRoots;
  5070. // fall-through
  5071. case CollectionStateConcurrentFindRoots:
  5072. this->BackgroundFindRoots();
  5073. this->BackgroundScanStack();
  5074. this->collectionState = CollectionStateConcurrentMark;
  5075. // fall-through
  5076. case CollectionStateConcurrentMark:
  5077. this->BackgroundMark();
  5078. Assert(this->collectionState == CollectionStateConcurrentMark);
  5079. RECORD_TIMESTAMP(concurrentMarkFinishTime);
  5080. break;
  5081. case CollectionStateConcurrentFinishMark:
  5082. this->backgroundRescanRootBytes = this->BackgroundFinishMark();
  5083. Assert(!HasPendingMarkObjects());
  5084. break;
  5085. default:
  5086. Assert(false);
  5087. break;
  5088. };
  5089. GCETW_INTERNAL(GC_STOP, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5090. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, this->collectionState == CollectionStateConcurrentFinishMark ?
  5091. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5092. this->collectionState = CollectionStateRescanWait;
  5093. DebugOnly(this->markContext.GetPageAllocator()->ClearConcurrentThreadId());
  5094. }
  5095. else
  5096. {
  5097. Assert(this->enableConcurrentSweep);
  5098. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5099. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
  5100. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep));
  5101. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5102. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5103. if (CONFIG_FLAG(EnableBGFreeZero))
  5104. {
  5105. // Zero the queued pages first so they are available to be allocated
  5106. recyclerPageAllocator.BackgroundZeroQueuedPages();
  5107. recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
  5108. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5109. recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
  5110. #endif
  5111. }
  5112. #endif
  5113. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5114. GCETW(GC_BACKGROUNDSWEEP_START, (this));
  5115. Assert(this->recyclerSweep != nullptr);
  5116. this->recyclerSweep->BackgroundSweep();
  5117. uint sweptBytes = 0;
  5118. #ifdef RECYCLER_STATS
  5119. sweptBytes = (uint)collectionStats.objectSweptBytes;
  5120. #endif
  5121. GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
  5122. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5123. if (CONFIG_FLAG(EnableBGFreeZero))
  5124. {
  5125. // Drain the zero queue again as we might have free more during sweep
  5126. // in the background
  5127. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5128. recyclerPageAllocator.BackgroundZeroQueuedPages();
  5129. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5130. recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
  5131. #endif
  5132. recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
  5133. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5134. }
  5135. #endif
  5136. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
  5137. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5138. this->collectionState = CollectionStateTransferSweptWait;
  5139. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
  5140. }
  5141. SetEvent(this->concurrentWorkDoneEvent);
  5142. collectionWrapper->WaitCollectionCallBack();
  5143. }
  5144. DWORD
  5145. Recycler::ThreadProc()
  5146. {
  5147. Assert(this->IsConcurrentEnabled());
  5148. #if !defined(_UCRT)
  5149. // We do this before we set the concurrentWorkDoneEvent because GetModuleHandleEx requires
  5150. // getting the loader lock. We could have the following case:
  5151. // Thread A => Initialize Concurrent Thread (C)
  5152. // C signals Signal Done
  5153. // C yields since its lower priority
  5154. // Thread A starts running- and is told to shut down.
  5155. // Thread A grabs loader lock as part of the shutdown sequence
  5156. // Thread A waits for C to be done
  5157. // C wakes up now- and tries to grab loader lock.
  5158. // To prevent this deadlock, we call GetModuleHandleEx first and then set the concurrentWorkDoneEvent
  5159. HMODULE dllHandle = NULL;
  5160. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&Recycler::StaticThreadProc, &dllHandle))
  5161. {
  5162. dllHandle = NULL;
  5163. }
  5164. #endif
  5165. #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
  5166. // LTTng has no concept of EventActivityIdControl
  5167. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  5168. GUID activityId = { 0 };
  5169. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  5170. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  5171. #endif
  5172. // Signal that the thread has started
  5173. SetEvent(this->concurrentWorkDoneEvent);
  5174. SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
  5175. #if defined(DBG) && defined(PROFILE_EXEC)
  5176. this->backgroundProfilerPageAllocator.SetConcurrentThreadId(::GetCurrentThreadId());
  5177. #endif
  5178. #ifdef IDLE_DECOMMIT_ENABLED
  5179. DWORD handleCount = this->concurrentIdleDecommitEvent? 2 : 1;
  5180. HANDLE handles[2] = { this->concurrentWorkReadyEvent, this->concurrentIdleDecommitEvent };
  5181. #endif
  5182. do
  5183. {
  5184. #ifdef IDLE_DECOMMIT_ENABLED
  5185. needIdleDecommitSignal = IdleDecommitSignal_None;
  5186. DWORD threadPageAllocatorWaitTime = threadPageAllocator->IdleDecommit();
  5187. DWORD recyclerPageAllocatorWaitTime = recyclerPageAllocator.IdleDecommit();
  5188. DWORD waitTime = min(threadPageAllocatorWaitTime, recyclerPageAllocatorWaitTime);
  5189. DWORD recyclerLargeBlockPageAllocatorWaitTime = recyclerLargeBlockPageAllocator.IdleDecommit();
  5190. waitTime = min(waitTime, recyclerLargeBlockPageAllocatorWaitTime);
  5191. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5192. DWORD recyclerWithBarrierPageAllocatorWaitTime = recyclerWithBarrierPageAllocator.IdleDecommit();
  5193. waitTime = min(waitTime, recyclerWithBarrierPageAllocatorWaitTime);
  5194. #endif
  5195. if (waitTime == INFINITE)
  5196. {
  5197. DWORD ret = ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedSignal, IdleDecommitSignal_None);
  5198. if (ret == IdleDecommitSignal_NeedTimer)
  5199. {
  5200. #if DBG
  5201. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5202. {
  5203. Output::Print(_u("Recycler Thread IdleDecommit Need Timer\n"));
  5204. Output::Flush();
  5205. }
  5206. #endif
  5207. continue;
  5208. }
  5209. }
  5210. #if DBG
  5211. else
  5212. {
  5213. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5214. {
  5215. Output::Print(_u("Recycler Thread IdleDecommit Wait %d\n"), waitTime);
  5216. Output::Flush();
  5217. }
  5218. }
  5219. #endif
  5220. DWORD result = WaitForMultipleObjectsEx(handleCount, handles, FALSE, waitTime, FALSE);
  5221. if (result != WAIT_OBJECT_0)
  5222. {
  5223. Assert((handleCount == 2 && result == WAIT_OBJECT_0 + 1) || (waitTime != INFINITE && result == WAIT_TIMEOUT));
  5224. #if DBG
  5225. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5226. {
  5227. if (result == WAIT_TIMEOUT)
  5228. {
  5229. Output::Print(_u("Recycler Thread IdleDecommit Timeout: %d\n"), waitTime);
  5230. }
  5231. else
  5232. {
  5233. Output::Print(_u("Recycler Thread IdleDecommit Signaled\n"));
  5234. }
  5235. Output::Flush();
  5236. }
  5237. #endif
  5238. continue;
  5239. }
  5240. #else
  5241. DWORD result = WaitForSingleObject(this->concurrentWorkReadyEvent, INFINITE);
  5242. Assert(result == WAIT_OBJECT_0);
  5243. #endif
  5244. if (this->collectionState == CollectionStateExit)
  5245. {
  5246. #if DBG
  5247. this->concurrentThreadExited = true;
  5248. #endif
  5249. break;
  5250. }
  5251. DoBackgroundWork();
  5252. }
  5253. while (true);
  5254. SetEvent(this->concurrentWorkDoneEvent);
  5255. #if !defined(_UCRT)
  5256. if (dllHandle)
  5257. {
  5258. FreeLibraryAndExitThread(dllHandle, 0);
  5259. }
  5260. else
  5261. #endif
  5262. {
  5263. return 0;
  5264. }
  5265. }
  5266. #endif //ENABLE_CONCURRENT_GC
  5267. #if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5268. void
  5269. Recycler::FinishConcurrentSweep()
  5270. {
  5271. #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
  5272. this->autoHeap.FinishConcurrentSweep();
  5273. #endif
  5274. }
  5275. #endif
  5276. void
  5277. Recycler::FinishCollection(bool needConcurrentSweep)
  5278. {
  5279. #if ENABLE_CONCURRENT_GC
  5280. Assert(!!this->InConcurrentSweep() == needConcurrentSweep);
  5281. #else
  5282. Assert(!needConcurrentSweep);
  5283. #endif
  5284. if (!needConcurrentSweep)
  5285. {
  5286. FinishCollection();
  5287. }
  5288. else
  5289. {
  5290. FinishDisposeObjects();
  5291. }
  5292. }
  5293. void
  5294. Recycler::FinishCollection()
  5295. {
  5296. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  5297. Assert(!this->hasBackgroundFinishPartial);
  5298. #endif
  5299. Assert(!this->hasPendingDeleteGuestArena);
  5300. // Reset the time heuristics
  5301. ScheduleNextCollection();
  5302. {
  5303. AutoSwitchCollectionStates collectionState(this,
  5304. /* entry state */ CollectionStatePostCollectionCallback,
  5305. /* exit state */ CollectionStateNotCollecting);
  5306. collectionWrapper->PostCollectionCallBack();
  5307. }
  5308. #if ENABLE_CONCURRENT_GC
  5309. this->backgroundFinishMarkCount = 0;
  5310. #endif
  5311. // Do a partial page decommit now
  5312. if (decommitOnFinish)
  5313. {
  5314. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  5315. {
  5316. pageAlloc->DecommitNow(false);
  5317. });
  5318. this->decommitOnFinish = false;
  5319. }
  5320. RECYCLER_SLOW_CHECK(autoHeap.Check());
  5321. #ifdef RECYCLER_MEMORY_VERIFY
  5322. this->Verify(Js::RecyclerPhase);
  5323. #endif
  5324. #ifdef RECYCLER_FINALIZE_CHECK
  5325. autoHeap.VerifyFinalize();
  5326. #endif
  5327. #ifdef ENABLE_JS_ETW
  5328. FlushFreeRecord();
  5329. #endif
  5330. FinishDisposeObjects();
  5331. #ifdef RECYCLER_FINALIZE_CHECK
  5332. if (!this->IsMarkState())
  5333. {
  5334. autoHeap.VerifyFinalize();
  5335. }
  5336. #endif
  5337. #ifdef RECYCLER_STATS
  5338. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5339. {
  5340. PrintCollectStats();
  5341. }
  5342. #endif
  5343. #ifdef PROFILE_RECYCLER_ALLOC
  5344. if (MemoryProfiler::IsTraceEnabled(true))
  5345. {
  5346. PrintAllocStats();
  5347. }
  5348. #endif
  5349. #if ENABLE_MEM_STATS
  5350. autoHeap.ReportMemStats();
  5351. #endif
  5352. RECORD_TIMESTAMP(currentCollectionEndTime);
  5353. }
  5354. void
  5355. Recycler::SetExternalRootMarker(ExternalRootMarker fn, void * context)
  5356. {
  5357. externalRootMarker = fn;
  5358. externalRootMarkerContext = context;
  5359. }
  5360. void
  5361. Recycler::SetCollectionWrapper(RecyclerCollectionWrapper * wrapper)
  5362. {
  5363. this->collectionWrapper = wrapper;
  5364. #if LARGEHEAPBLOCK_ENCODING
  5365. this->Cookie = wrapper->GetRandomNumber();
  5366. #else
  5367. this->Cookie = 0;
  5368. #endif
  5369. }
  5370. // TODO: (leish) remove following function? seems not make sense to re-allocate in recycler
  5371. char *
  5372. Recycler::Realloc(void* buffer, DECLSPEC_GUARD_OVERFLOW size_t existingBytes, DECLSPEC_GUARD_OVERFLOW size_t requestedBytes, bool truncate)
  5373. {
  5374. Assert(requestedBytes > 0);
  5375. if (existingBytes == 0)
  5376. {
  5377. Assert(buffer == nullptr);
  5378. return Alloc(requestedBytes);
  5379. }
  5380. Assert(buffer != nullptr);
  5381. size_t nbytes = AllocSizeMath::Align(requestedBytes, HeapConstants::ObjectGranularity);
  5382. // Since we successfully allocated, we shouldn't have integer overflow here
  5383. size_t nbytesExisting = AllocSizeMath::Align(existingBytes, HeapConstants::ObjectGranularity);
  5384. Assert(nbytesExisting >= existingBytes);
  5385. if (nbytes == nbytesExisting)
  5386. {
  5387. return (char *)buffer;
  5388. }
  5389. char* replacementBuf = this->Alloc(requestedBytes);
  5390. if (replacementBuf != nullptr)
  5391. {
  5392. // Truncate
  5393. if (existingBytes > requestedBytes && truncate)
  5394. {
  5395. js_memcpy_s(replacementBuf, requestedBytes, buffer, requestedBytes);
  5396. }
  5397. else
  5398. {
  5399. js_memcpy_s(replacementBuf, requestedBytes, buffer, existingBytes);
  5400. }
  5401. }
  5402. if (nbytesExisting > 0)
  5403. {
  5404. this->Free(buffer, nbytesExisting);
  5405. }
  5406. return replacementBuf;
  5407. }
  5408. bool
  5409. Recycler::ForceSweepObject()
  5410. {
  5411. #ifdef RECYCLER_TEST_SUPPORT
  5412. if (BinaryFeatureControl::RecyclerTest())
  5413. {
  5414. if (checkFn != nullptr)
  5415. {
  5416. return true;
  5417. }
  5418. }
  5419. #endif
  5420. #ifdef PROFILE_RECYCLER_ALLOC
  5421. if (trackerDictionary != nullptr)
  5422. {
  5423. // Need to sweep object if we are tracing recycler allocs
  5424. return true;
  5425. }
  5426. #endif
  5427. #ifdef RECYCLER_STATS
  5428. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5429. {
  5430. return true;
  5431. }
  5432. #endif
  5433. #if DBG
  5434. // Force sweeping the object so we can assert that we are not sweeping objects that are still implicit roots
  5435. if (this->enableScanImplicitRoots)
  5436. {
  5437. return true;
  5438. }
  5439. #endif
  5440. return false;
  5441. }
  5442. bool
  5443. Recycler::ShouldIdleCollectOnExit()
  5444. {
  5445. // Always reset partial heuristics even if we are not doing idle collecting
  5446. // So we don't carry the heuristics to the next script activation
  5447. this->ResetPartialHeuristicCounters();
  5448. if (this->CollectionInProgress())
  5449. {
  5450. #ifdef RECYCLER_TRACE
  5451. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase, _u("%04X> Skipping scheduling Idle Collect. Reason: Collection in progress\n"), ::GetCurrentThreadId());
  5452. #endif
  5453. // Don't schedule an idle collect if there is a collection going on already
  5454. // IDLE-GC-TODO: Fix ResetHeuristics in the GC so we can detect memory allocation during
  5455. // the concurrent collect and still schedule an idle collect
  5456. return false;
  5457. }
  5458. if (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5459. {
  5460. return true;
  5461. }
  5462. uint32 nextTime = tickCountNextCollection - tickDiffToNextCollect;
  5463. // We will try to start a concurrent collect if we are within .9 ms to next scheduled collection, AND,
  5464. // the size of allocation is larger than 32M. This is similar to CollectionAllocation logic, just
  5465. // earlier in both time heuristic and size heuristic, so we can do some concurrent GC while we are
  5466. // not in script.
  5467. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit
  5468. && GetTickCount() > nextTime)
  5469. {
  5470. #ifdef RECYCLER_TRACE
  5471. if (CUSTOM_PHASE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5472. {
  5473. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit)
  5474. {
  5475. Output::Print(_u("%04X> Idle collect on exit: alloc %d\n"), ::GetCurrentThreadId(), autoHeap.uncollectedAllocBytes);
  5476. }
  5477. else
  5478. {
  5479. Output::Print(_u("%04X> Idle collect on exit: time %d\n"), ::GetCurrentThreadId(), tickCountNextCollection - GetTickCount());
  5480. }
  5481. Output::Flush();
  5482. }
  5483. #endif
  5484. this->CollectNow<CollectNowConcurrent>();
  5485. return false;
  5486. }
  5487. Assert(!this->CollectionInProgress());
  5488. // Idle GC use the size heuristic. Only need to schedule on if we passed it.
  5489. return (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::IdleUncollectedAllocBytesCollection);
  5490. }
  5491. #if ENABLE_CONCURRENT_GC
  5492. bool
  5493. RecyclerParallelThread::StartConcurrent()
  5494. {
  5495. if (this->recycler->threadService->HasCallback())
  5496. {
  5497. // This may be the first time. If so, initialize by creating the doneEvent.
  5498. if (this->concurrentWorkDoneEvent == NULL)
  5499. {
  5500. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5501. if (this->concurrentWorkDoneEvent == nullptr)
  5502. {
  5503. return false;
  5504. }
  5505. }
  5506. Assert(concurrentThread == NULL);
  5507. Assert(concurrentWorkReadyEvent == NULL);
  5508. // Invoke thread service to process work
  5509. if (!this->recycler->threadService->Invoke(RecyclerParallelThread::StaticBackgroundWorkCallback, this))
  5510. {
  5511. return false;
  5512. }
  5513. }
  5514. else
  5515. {
  5516. // This may be the first time. If so, initialize and create thread.
  5517. if (this->concurrentWorkDoneEvent == NULL)
  5518. {
  5519. return this->EnableConcurrent(false);
  5520. }
  5521. else
  5522. {
  5523. Assert(this->concurrentThread != NULL);
  5524. Assert(this->concurrentWorkReadyEvent != NULL);
  5525. // signal that thread has been initialized
  5526. SetEvent(this->concurrentWorkReadyEvent);
  5527. }
  5528. }
  5529. return true;
  5530. }
  5531. bool
  5532. RecyclerParallelThread::EnableConcurrent(bool waitForThread)
  5533. {
  5534. this->synchronizeOnStartup = waitForThread;
  5535. Assert(this->concurrentWorkDoneEvent == NULL);
  5536. Assert(this->concurrentWorkReadyEvent == NULL);
  5537. Assert(this->concurrentThread == NULL);
  5538. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5539. if (this->concurrentWorkDoneEvent == nullptr)
  5540. {
  5541. return false;
  5542. }
  5543. this->concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5544. if (this->concurrentWorkReadyEvent == nullptr)
  5545. {
  5546. CloseHandle(this->concurrentWorkDoneEvent);
  5547. this->concurrentWorkDoneEvent = NULL;
  5548. return false;
  5549. }
  5550. this->concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &RecyclerParallelThread::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
  5551. if (this->concurrentThread != nullptr && waitForThread)
  5552. {
  5553. // Wait for thread to initialize
  5554. HANDLE handle[2] = { this->concurrentWorkDoneEvent, this->concurrentThread };
  5555. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  5556. if (ret == WAIT_OBJECT_0)
  5557. {
  5558. return true;
  5559. }
  5560. CloseHandle(concurrentThread);
  5561. concurrentThread = nullptr;
  5562. }
  5563. if (this->concurrentThread == nullptr)
  5564. {
  5565. CloseHandle(this->concurrentWorkDoneEvent);
  5566. this->concurrentWorkDoneEvent = NULL;
  5567. CloseHandle(this->concurrentWorkReadyEvent);
  5568. this->concurrentWorkReadyEvent = NULL;
  5569. return false;
  5570. }
  5571. return true;
  5572. }
  5573. template <uint parallelId>
  5574. void
  5575. Recycler::ParallelWorkFunc()
  5576. {
  5577. Assert(parallelId == 0 || parallelId == 1);
  5578. MarkContext * markContext = (parallelId == 0 ? &this->parallelMarkContext2 : &this->parallelMarkContext3);
  5579. switch (this->collectionState)
  5580. {
  5581. case CollectionStateParallelMark:
  5582. this->ProcessParallelMark(false, markContext);
  5583. break;
  5584. case CollectionStateBackgroundParallelMark:
  5585. this->ProcessParallelMark(true, markContext);
  5586. break;
  5587. default:
  5588. Assert(false);
  5589. }
  5590. }
  5591. void
  5592. RecyclerParallelThread::WaitForConcurrent()
  5593. {
  5594. Assert(this->concurrentThread != NULL || this->recycler->threadService->HasCallback());
  5595. Assert(this->concurrentWorkDoneEvent != NULL);
  5596. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, INFINITE);
  5597. Assert(ret == WAIT_OBJECT_0);
  5598. }
  5599. void
  5600. RecyclerParallelThread::Shutdown()
  5601. {
  5602. Assert(this->recycler->collectionState == CollectionStateExit);
  5603. if (this->recycler->threadService->HasCallback())
  5604. {
  5605. if (this->concurrentWorkDoneEvent != NULL)
  5606. {
  5607. CloseHandle(this->concurrentWorkDoneEvent);
  5608. this->concurrentWorkDoneEvent = NULL;
  5609. }
  5610. }
  5611. else
  5612. {
  5613. if (this->concurrentThread != NULL)
  5614. {
  5615. HANDLE handles[2] = { concurrentWorkDoneEvent, concurrentThread };
  5616. SetEvent(concurrentWorkReadyEvent);
  5617. // During process shutdown, OS might kill this (recycler parallel i.e. concurrent) thread and it will not get chance to signal concurrentWorkDoneEvent.
  5618. // When we are performing shutdown of main (recycler) thread here, if we wait on concurrentWorkDoneEvent, WaitForObject() will never return.
  5619. // Hence wait for concurrentWorkDoneEvent + concurrentThread so if concurrentThread got killed, WaitForObject() will return and we will
  5620. // proceed further.
  5621. DWORD fRet = WaitForMultipleObjectsEx(2, handles, FALSE, INFINITE, FALSE);
  5622. AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
  5623. CloseHandle(this->concurrentWorkDoneEvent);
  5624. this->concurrentWorkDoneEvent = NULL;
  5625. CloseHandle(this->concurrentWorkReadyEvent);
  5626. this->concurrentWorkReadyEvent = NULL;
  5627. CloseHandle(this->concurrentThread);
  5628. this->concurrentThread = NULL;
  5629. }
  5630. }
  5631. Assert(this->concurrentThread == NULL);
  5632. Assert(this->concurrentWorkReadyEvent == NULL);
  5633. Assert(this->concurrentWorkDoneEvent == NULL);
  5634. }
  5635. // static
  5636. unsigned int
  5637. RecyclerParallelThread::StaticThreadProc(LPVOID lpParameter)
  5638. {
  5639. DWORD ret = (DWORD)-1;
  5640. #if !DISABLE_SEH
  5641. __try
  5642. {
  5643. #endif
  5644. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)lpParameter;
  5645. Recycler * recycler = parallelThread->recycler;
  5646. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  5647. Assert(recycler->IsConcurrentEnabled());
  5648. #if !defined(_UCRT)
  5649. HMODULE dllHandle = NULL;
  5650. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&RecyclerParallelThread::StaticThreadProc, &dllHandle))
  5651. {
  5652. dllHandle = NULL;
  5653. }
  5654. #endif
  5655. #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
  5656. // LTTng has no concept of EventActivityIdControl
  5657. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  5658. GUID activityId = { 0 };
  5659. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  5660. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  5661. #endif
  5662. // If this thread is created on demand we already have work to process and do not need to wait
  5663. bool mustWait = parallelThread->synchronizeOnStartup;
  5664. do
  5665. {
  5666. if (mustWait)
  5667. {
  5668. // Signal completion and wait for next work
  5669. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5670. DWORD result = WaitForSingleObject(parallelThread->concurrentWorkReadyEvent, INFINITE);
  5671. Assert(result == WAIT_OBJECT_0);
  5672. }
  5673. if (recycler->collectionState == CollectionStateExit)
  5674. {
  5675. // Exit thread
  5676. break;
  5677. }
  5678. // Invoke the workFunc to do real work
  5679. (recycler->*workFunc)();
  5680. // We always wait after the first time
  5681. mustWait = true;
  5682. }
  5683. while (true);
  5684. // Signal to main thread that we have stopped processing and will shut down.
  5685. // Note that after this point, we cannot access anything on the Recycler instance
  5686. // because the main thread may have torn it down already.
  5687. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5688. #if !defined(_UCRT)
  5689. if (dllHandle)
  5690. {
  5691. FreeLibraryAndExitThread(dllHandle, 0);
  5692. }
  5693. #endif
  5694. ret = 0;
  5695. #if !DISABLE_SEH
  5696. }
  5697. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  5698. {
  5699. Assert(false);
  5700. }
  5701. #endif
  5702. return ret;
  5703. }
  5704. // static
  5705. void
  5706. RecyclerParallelThread::StaticBackgroundWorkCallback(void * callbackData)
  5707. {
  5708. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)callbackData;
  5709. Recycler * recycler = parallelThread->recycler;
  5710. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  5711. (recycler->*workFunc)();
  5712. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5713. }
  5714. #endif
  5715. #ifdef RECYCLER_TRACE
  5716. void
  5717. Recycler::CaptureCollectionParam(CollectionFlags flags, bool repeat)
  5718. {
  5719. collectionParam.priorityBoostConcurrentSweepOverride = false;
  5720. collectionParam.repeat = repeat;
  5721. collectionParam.finishOnly = false;
  5722. collectionParam.flags = flags;
  5723. collectionParam.uncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  5724. #if ENABLE_PARTIAL_GC
  5725. collectionParam.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  5726. collectionParam.inPartialCollectMode = inPartialCollectMode;
  5727. collectionParam.uncollectedNewPageCount = autoHeap.uncollectedNewPageCount;
  5728. collectionParam.unusedPartialCollectFreeBytes = autoHeap.unusedPartialCollectFreeBytes;
  5729. #endif
  5730. }
  5731. void
  5732. Recycler::PrintCollectTrace(Js::Phase phase, bool finish, bool noConcurrentWork)
  5733. {
  5734. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase) ||
  5735. GetRecyclerFlagsTable().Trace.IsEnabled(phase))
  5736. {
  5737. const BOOL allocSize = collectionParam.flags & CollectHeuristic_AllocSize;
  5738. const BOOL timedIfScriptActive = collectionParam.flags & CollectHeuristic_TimeIfScriptActive;
  5739. const BOOL timedIfInScript = collectionParam.flags & CollectHeuristic_TimeIfInScript;
  5740. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (collectionParam.flags & CollectHeuristic_Time);
  5741. const BOOL concurrent = collectionParam.flags & CollectMode_Concurrent;
  5742. const BOOL finishConcurrent = collectionParam.flags & CollectOverride_FinishConcurrent;
  5743. const BOOL exhaustive = collectionParam.flags & CollectMode_Exhaustive;
  5744. const BOOL forceInThread = collectionParam.flags & CollectOverride_ForceInThread;
  5745. const BOOL forceFinish = collectionParam.flags & CollectOverride_ForceFinish;
  5746. #if ENABLE_PARTIAL_GC
  5747. BOOL partial = collectionParam.flags & CollectMode_Partial ;
  5748. #endif
  5749. Output::Print(_u("%04X> RC(%p): %s%s%s%s%s%s%s:"), this->mainThreadId, this,
  5750. collectionParam.domCollect? _u("[DOM] ") : _u(""),
  5751. collectionParam.repeat? _u("[Repeat] "): _u(""),
  5752. this->inDispose? _u("[Nested]") : _u(""),
  5753. forceInThread? _u("Force In thread ") : _u(""),
  5754. finish? _u("Finish ") : _u(""),
  5755. exhaustive? _u("Exhaustive ") : _u(""),
  5756. Js::PhaseNames[phase]);
  5757. if (noConcurrentWork)
  5758. {
  5759. Assert(finish);
  5760. Output::Print(_u(" No concurrent work"));
  5761. }
  5762. else if (collectionParam.finishOnly)
  5763. {
  5764. Assert(!collectionParam.repeat);
  5765. Assert(finish);
  5766. #if ENABLE_CONCURRENT_GC
  5767. if (collectionState == CollectionStateRescanWait)
  5768. {
  5769. if (forceFinish)
  5770. {
  5771. Output::Print(_u(" Force finish mark and sweep"));
  5772. }
  5773. else if (concurrent && this->enableConcurrentSweep)
  5774. {
  5775. if (!collectionParam.priorityBoostConcurrentSweepOverride)
  5776. {
  5777. Output::Print(_u(" Finish mark and start concurrent sweep"));
  5778. }
  5779. else
  5780. {
  5781. Output::Print(_u(" Finish mark and sweep (priority boost overridden concurrent sweep)"));
  5782. }
  5783. }
  5784. else
  5785. {
  5786. Output::Print(_u(" Finish mark and sweep"));
  5787. }
  5788. }
  5789. else
  5790. {
  5791. Assert(collectionState == CollectionStateTransferSweptWait);
  5792. if (forceFinish)
  5793. {
  5794. Output::Print(_u(" Force finish sweep"));
  5795. }
  5796. else
  5797. {
  5798. Output::Print(_u(" Finish sweep"));
  5799. }
  5800. }
  5801. #endif // ENABLE_CONCURRENT_GC
  5802. }
  5803. else
  5804. {
  5805. if (finish && !concurrent)
  5806. {
  5807. Output::Print(_u(" Not concurrent collect"));
  5808. }
  5809. if ((finish && finishConcurrent))
  5810. {
  5811. Output::Print(_u(" No heuristic"));
  5812. }
  5813. #if ENABLE_CONCURRENT_GC
  5814. else if (finish && priorityBoost)
  5815. {
  5816. Output::Print(_u(" Priority boost no heuristic"));
  5817. }
  5818. #endif
  5819. else
  5820. {
  5821. Output::SkipToColumn(50);
  5822. bool byteCountUsed = false;
  5823. bool timeUsed = false;
  5824. #if ENABLE_PARTIAL_GC
  5825. bool newPageUsed = false;
  5826. if (phase == Js::PartialCollectPhase || phase == Js::ConcurrentPartialCollectPhase)
  5827. {
  5828. Assert(collectionParam.flags & CollectMode_Partial);
  5829. newPageUsed = !!allocSize;
  5830. }
  5831. else if (partial && collectionParam.inPartialCollectMode && collectionParam.uncollectedNewPageCount > collectionParam.uncollectedNewPageCountPartialCollect)
  5832. {
  5833. newPageUsed = true;
  5834. }
  5835. else
  5836. #endif // ENABLE_PARTIAL_GC
  5837. {
  5838. byteCountUsed = !!allocSize;
  5839. timeUsed = !!timed;
  5840. }
  5841. Output::Print(byteCountUsed? _u("*") : (allocSize? _u(" ") : _u("~")));
  5842. Output::Print(_u("B:%8d "), collectionParam.uncollectedAllocBytes);
  5843. Output::Print(timeUsed? _u("*") : (timed? _u(" ") : _u("~")));
  5844. Output::Print(_u("T:%4d "), -collectionParam.timeDiff);
  5845. #if ENABLE_PARTIAL_GC
  5846. if (collectionParam.inPartialCollectMode)
  5847. {
  5848. Output::Print(_u("L:%5d "), collectionParam.uncollectedNewPageCountPartialCollect);
  5849. }
  5850. else
  5851. {
  5852. Output::Print(_u("L:----- "));
  5853. }
  5854. Output::Print(newPageUsed? _u("*") : (partial? _u(" ") : _u("~")));
  5855. Output::Print(_u("P:%5d(%9d) "), collectionParam.uncollectedNewPageCount, collectionParam.uncollectedNewPageCount * AutoSystemInfo::PageSize);
  5856. Output::Print(_u("U:%8d"), collectionParam.unusedPartialCollectFreeBytes);
  5857. #endif // ENABLE_PARTIAL_GC
  5858. }
  5859. }
  5860. Output::Print(_u("\n"));
  5861. Output::Flush();
  5862. }
  5863. }
  5864. #endif
  5865. #ifdef RECYCLER_STATS
  5866. void
  5867. Recycler::PrintHeapBlockStats(char16 const * name, HeapBlock::HeapBlockType type)
  5868. {
  5869. size_t liveCount = collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type];
  5870. Output::Print(_u(" %6s : %5d %5d %5d %5.1f"), name,
  5871. liveCount, collectionStats.heapBlockFreeCount[type], collectionStats.heapBlockCount[type],
  5872. (double)collectionStats.heapBlockFreeCount[type] / (double)collectionStats.heapBlockCount[type] * 100);
  5873. if (type < HeapBlock::SmallBlockTypeCount)
  5874. {
  5875. Output::Print(_u(" : %5d %6.1f : %5d %6.1f"),
  5876. collectionStats.heapBlockSweptCount[type],
  5877. (double)collectionStats.heapBlockSweptCount[type] / (double)liveCount * 100,
  5878. collectionStats.heapBlockConcurrentSweptCount[type],
  5879. (double)collectionStats.heapBlockConcurrentSweptCount[type] / (double)collectionStats.heapBlockSweptCount[type] * 100);
  5880. }
  5881. }
  5882. void
  5883. Recycler::PrintHeapBlockMemoryStats(char16 const * name, HeapBlock::HeapBlockType type)
  5884. {
  5885. size_t allocableFreeByteCount = collectionStats.heapBlockFreeByteCount[type];
  5886. #if ENABLE_PARTIAL_GC
  5887. size_t partialUnusedBytes = 0;
  5888. if (this->enablePartialCollect)
  5889. {
  5890. partialUnusedBytes = allocableFreeByteCount
  5891. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[type];
  5892. allocableFreeByteCount -= partialUnusedBytes;
  5893. }
  5894. #endif
  5895. size_t blockPages = type < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount ?
  5896. SmallAllocationBlockAttributes::PageCount : MediumAllocationBlockAttributes::PageCount;
  5897. size_t totalByteCount = (collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type]) * blockPages * AutoSystemInfo::PageSize;
  5898. size_t liveByteCount = totalByteCount - collectionStats.heapBlockFreeByteCount[type];
  5899. Output::Print(_u(" %6s: %10d %10d"), name, liveByteCount, allocableFreeByteCount);
  5900. #if ENABLE_PARTIAL_GC
  5901. if (this->enablePartialCollect &&
  5902. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  5903. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  5904. #ifdef RECYCLER_WRITE_BARRIER
  5905. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  5906. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  5907. #endif
  5908. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  5909. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  5910. #ifdef RECYCLER_WRITE_BARRIER
  5911. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  5912. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  5913. #endif
  5914. ))
  5915. {
  5916. Output::Print(_u(" %10d"), partialUnusedBytes);
  5917. }
  5918. else
  5919. #endif
  5920. {
  5921. Output::Print(_u(" "));
  5922. }
  5923. Output::Print(_u(" %10d %6.1f"), totalByteCount,
  5924. (double)allocableFreeByteCount / (double)totalByteCount * 100);
  5925. #if ENABLE_PARTIAL_GC
  5926. if (this->enablePartialCollect &&
  5927. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  5928. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  5929. #ifdef RECYCLER_WRITE_BARRIER
  5930. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  5931. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  5932. #endif
  5933. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  5934. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  5935. #ifdef RECYCLER_WRITE_BARRIER
  5936. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  5937. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  5938. #endif
  5939. ))
  5940. {
  5941. Output::Print(_u(" %6.1f"), (double)partialUnusedBytes / (double)totalByteCount * 100);
  5942. }
  5943. #endif
  5944. }
  5945. void
  5946. Recycler::PrintHeuristicCollectionStats()
  5947. {
  5948. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5949. Output::Print(_u("GC Trigger : %10s %10s %10s"), _u("Start"), _u("Continue"), _u("Finish"));
  5950. #if ENABLE_PARTIAL_GC
  5951. if (this->enablePartialCollect)
  5952. {
  5953. Output::Print(_u(" | Heuristics : %10s %10s %5s"), _u(""), _u(""), _u("%"));
  5954. }
  5955. #endif
  5956. Output::Print(_u("\n"));
  5957. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5958. Output::Print(_u(" Alloc bytes : %10d %10d %10d"), collectionStats.startCollectAllocBytes, collectionStats.continueCollectAllocBytes, this->autoHeap.uncollectedAllocBytes);
  5959. #if ENABLE_PARTIAL_GC
  5960. if (this->enablePartialCollect)
  5961. {
  5962. Output::Print(_u(" | Cost : %10d %10d %5.1f"), collectionStats.rescanRootBytes, collectionStats.estimatedPartialReuseBytes, collectionStats.collectCost * 100);
  5963. }
  5964. #endif
  5965. Output::Print(_u("\n"));
  5966. #if ENABLE_PARTIAL_GC
  5967. if (this->enablePartialCollect)
  5968. {
  5969. Output::Print(_u(" | Efficacy : %10s %10s %5.1f\n"), _u(""), _u(""), collectionStats.collectEfficacy * 100);
  5970. }
  5971. #endif
  5972. #if ENABLE_PARTIAL_GC
  5973. if (this->enablePartialCollect)
  5974. {
  5975. Output::Print(_u(" New page : %10d %10s %10d"), collectionStats.startCollectNewPageCount, _u(""), autoHeap.uncollectedNewPageCount);
  5976. Output::Print(_u(" | Partial Uncollect New Page : %10d %10d"), collectionStats.uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize, this->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize);
  5977. Output::Print(_u("\n"));
  5978. }
  5979. #endif
  5980. Output::Print(_u(" Finish try : %10d %10s %10s"), collectionStats.finishCollectTryCount, _u(""), _u(""));
  5981. #if ENABLE_PARTIAL_GC
  5982. if (this->enablePartialCollect)
  5983. {
  5984. Output::Print(_u(" | Partial Reuse Min Free Bytes : %10d"), collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes * AutoSystemInfo::PageSize);
  5985. }
  5986. #endif
  5987. Output::Print(_u("\n"));
  5988. }
  5989. void
  5990. Recycler::PrintMarkCollectionStats()
  5991. {
  5992. size_t nonMark = collectionStats.tryMarkCount + collectionStats.tryMarkInteriorCount - collectionStats.remarkCount - collectionStats.markData.markCount;
  5993. size_t invalidCount = nonMark - collectionStats.tryMarkNullCount - collectionStats.tryMarkUnalignedCount
  5994. - collectionStats.tryMarkNonRecyclerMemoryCount
  5995. - collectionStats.tryMarkInteriorNonRecyclerMemoryCount
  5996. - collectionStats.tryMarkInteriorNullCount;
  5997. size_t leafCount = collectionStats.markData.markCount - collectionStats.scanCount;
  5998. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5999. Output::Print(_u("Try Mark :%9s %5s %10s | Non-Mark : %9s %5s | Mark :%9s %5s \n"), _u("Count"), _u("%"), _u("Bytes"), _u("Count"), _u("%"), _u("Count"), _u("%"));
  6000. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6001. Output::Print(_u(" TryMark :%9d %10d | Null : %9d %5.1f | Scan :%9d %5.1f\n"),
  6002. collectionStats.tryMarkCount, collectionStats.tryMarkCount * sizeof(void *),
  6003. collectionStats.tryMarkNullCount, (double)collectionStats.tryMarkNullCount / (double)nonMark * 100,
  6004. collectionStats.scanCount, (double)collectionStats.scanCount / (double)collectionStats.markData.markCount * 100);
  6005. Output::Print(_u(" Non-Mark :%9d %5.1f | Unaligned : %9d %5.1f | Leaf :%9d %5.1f\n"),
  6006. nonMark, (double)nonMark / (double)collectionStats.tryMarkCount * 100,
  6007. collectionStats.tryMarkUnalignedCount, (double)collectionStats.tryMarkUnalignedCount / (double)nonMark * 100,
  6008. leafCount, (double)leafCount / (double)collectionStats.markData.markCount * 100);
  6009. Output::Print(_u(" Mark :%9d %5.1f %10d | Non GC : %9d %5.1f | Track :%9d\n"),
  6010. collectionStats.markData.markCount, (double)collectionStats.markData.markCount / (double)collectionStats.tryMarkCount * 100, collectionStats.markData.markBytes,
  6011. collectionStats.tryMarkNonRecyclerMemoryCount, (double)collectionStats.tryMarkNonRecyclerMemoryCount / (double)nonMark * 100,
  6012. collectionStats.trackCount);
  6013. Output::Print(_u(" Remark :%9d %5.1f | Invalid : %9d %5.1f \n"),
  6014. collectionStats.remarkCount, (double)collectionStats.remarkCount / (double)collectionStats.tryMarkCount * 100,
  6015. invalidCount, (double)invalidCount / (double)nonMark * 100);
  6016. Output::Print(_u(" TryMark Int:%9d %10d | Null Int : %9d %5.1f | Root :%9d | New :%9d\n"),
  6017. collectionStats.tryMarkInteriorCount, collectionStats.tryMarkInteriorCount * sizeof(void *),
  6018. collectionStats.tryMarkInteriorNullCount, (double)collectionStats.tryMarkInteriorNullCount / (double)nonMark * 100,
  6019. collectionStats.rootCount, collectionStats.markThruNewObjCount);
  6020. Output::Print(_u(" | Non GC Int: %9d %5.1f | Stack :%9d | NewFalse:%9d\n"),
  6021. collectionStats.tryMarkInteriorNonRecyclerMemoryCount, (double)collectionStats.tryMarkInteriorNonRecyclerMemoryCount / (double)nonMark * 100,
  6022. collectionStats.stackCount, collectionStats.markThruFalseNewObjCount);
  6023. }
  6024. void
  6025. Recycler::PrintBackgroundCollectionStat(RecyclerCollectionStats::MarkData const& markData)
  6026. {
  6027. Output::Print(_u("BgSmall : %5d %6d %10d | BgLarge : %5d %6d %10d | BgMark :%9d "),
  6028. markData.rescanPageCount,
  6029. markData.rescanObjectCount,
  6030. markData.rescanObjectByteCount,
  6031. markData.rescanLargePageCount,
  6032. markData.rescanLargeObjectCount,
  6033. markData.rescanLargeByteCount,
  6034. markData.markCount);
  6035. double markRatio = (double)markData.markCount / (double)collectionStats.markData.markCount * 100;
  6036. if (markRatio == 100.0)
  6037. {
  6038. Output::Print(_u(" 100"));
  6039. }
  6040. else
  6041. {
  6042. Output::Print(_u("%4.1f"), markRatio);
  6043. }
  6044. Output::Print(_u("\n"));
  6045. }
  6046. void
  6047. Recycler::PrintBackgroundCollectionStats()
  6048. {
  6049. #if ENABLE_CONCURRENT_GC
  6050. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6051. Output::Print(_u("BgSmall : %5s %6s %10s | BgLarge : %5s %6s %10s | BgMark :%9s %4s %s\n"),
  6052. _u("Pages"), _u("Count"), _u("Bytes"), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"), _u("%"), _u("NonLeafBytes %"));
  6053. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6054. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[0]);
  6055. for (uint repeatCount = 1; repeatCount < RecyclerHeuristic::MaxBackgroundRepeatMarkCount; repeatCount++)
  6056. {
  6057. if (collectionStats.backgroundMarkData[repeatCount].markCount == 0)
  6058. {
  6059. break;
  6060. }
  6061. collectionStats.backgroundMarkData[repeatCount].rescanPageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanPageCount;
  6062. collectionStats.backgroundMarkData[repeatCount].rescanObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectCount;
  6063. collectionStats.backgroundMarkData[repeatCount].rescanObjectByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectByteCount;
  6064. collectionStats.backgroundMarkData[repeatCount].rescanLargePageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargePageCount;
  6065. collectionStats.backgroundMarkData[repeatCount].rescanLargeObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeObjectCount;
  6066. collectionStats.backgroundMarkData[repeatCount].rescanLargeByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeByteCount;
  6067. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[repeatCount]);
  6068. }
  6069. #endif
  6070. }
  6071. void
  6072. Recycler::PrintMemoryStats()
  6073. {
  6074. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6075. Output::Print(_u("Memory (Bytes) %4s %10s %10s %10s %6s %6s\n"), _u("Live"), _u("Free"), _u("Unused"), _u("Total"), _u("Free%"), _u("Unused%"));
  6076. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6077. PrintHeapBlockMemoryStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6078. Output::Print(_u("\n"));
  6079. PrintHeapBlockMemoryStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6080. Output::Print(_u("\n"));
  6081. #ifdef RECYCLER_WRITE_BARRIER
  6082. PrintHeapBlockMemoryStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6083. Output::Print(_u("\n"));
  6084. PrintHeapBlockMemoryStats(_u("SmFinSWB"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6085. Output::Print(_u("\n"));
  6086. #endif
  6087. PrintHeapBlockMemoryStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6088. Output::Print(_u("\n"));
  6089. PrintHeapBlockMemoryStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6090. Output::Print(_u("\n"));
  6091. PrintHeapBlockMemoryStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6092. Output::Print(_u("\n"));
  6093. #ifdef RECYCLER_WRITE_BARRIER
  6094. PrintHeapBlockMemoryStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6095. Output::Print(_u("\n"));
  6096. PrintHeapBlockMemoryStats(_u("MdFinSWB"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6097. Output::Print(_u("\n"));
  6098. #endif
  6099. PrintHeapBlockMemoryStats(_u("MdLeaf"), HeapBlock::MediumLeafBlockType);
  6100. Output::Print(_u("\n"));
  6101. size_t largeHeapBlockUnusedByteCount = collectionStats.largeHeapBlockTotalByteCount - collectionStats.largeHeapBlockUsedByteCount
  6102. - collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType];
  6103. Output::Print(_u(" Large: %10d %10d %10d %10d %6.1f %6.1f\n"),
  6104. collectionStats.largeHeapBlockUsedByteCount,
  6105. collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType],
  6106. largeHeapBlockUnusedByteCount,
  6107. collectionStats.largeHeapBlockTotalByteCount,
  6108. (double)collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType] / (double)collectionStats.largeHeapBlockTotalByteCount * 100,
  6109. (double)largeHeapBlockUnusedByteCount / (double)collectionStats.largeHeapBlockTotalByteCount * 100);
  6110. Output::Print(_u("\nSmall heap block zeroing stats since last GC\n"));
  6111. Output::Print(_u("Number of blocks with sweep state empty: normal=%d finalizable=%d leaf=%d\nNumber of blocks zeroed: %d\n"),
  6112. collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockType]
  6113. #ifdef RECYCLER_WRITE_BARRIER
  6114. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockWithBarrierType]
  6115. #endif
  6116. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockType]
  6117. #ifdef RECYCLER_WRITE_BARRIER
  6118. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockWithBarrierType]
  6119. #endif
  6120. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockType]
  6121. #ifdef RECYCLER_WRITE_BARRIER
  6122. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockWithBarrierType]
  6123. #endif
  6124. , collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockType]
  6125. #ifdef RECYCLER_WRITE_BARRIER
  6126. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockWithBarrierType]
  6127. #endif
  6128. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallLeafBlockType]
  6129. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumLeafBlockType],
  6130. collectionStats.numZeroedOutSmallBlocks);
  6131. }
  6132. void
  6133. Recycler::PrintCollectStats()
  6134. {
  6135. Output::Print(_u("Collection Stats:\n"));
  6136. PrintHeuristicCollectionStats();
  6137. PrintMarkCollectionStats();
  6138. PrintBackgroundCollectionStats();
  6139. size_t freeCount = collectionStats.objectSweptCount - collectionStats.objectSweptFreeListCount;
  6140. size_t freeBytes = collectionStats.objectSweptBytes - collectionStats.objectSweptFreeListBytes;
  6141. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6142. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6143. Output::Print(_u("Rescan : %5s %6s %10s | Track : %5s | "), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"));
  6144. #endif
  6145. Output::Print(_u("Sweep : %7s | SweptObj : %5s %5s %10s\n"), _u("Count"), _u("Count"), _u("%%"), _u("Bytes"));
  6146. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6147. Output::Print(_u(" Small : "));
  6148. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6149. Output::Print(_u("%5d %6d %10d | "), collectionStats.markData.rescanPageCount, collectionStats.markData.rescanObjectCount, collectionStats.markData.rescanObjectByteCount);
  6150. #endif
  6151. #if ENABLE_CONCURRENT_GC
  6152. Output::Print(_u("Process : %5d | "), collectionStats.trackedObjectCount);
  6153. #else
  6154. Output::Print(_u(" | "));
  6155. #endif
  6156. Output::Print(_u(" Scan : %7d | Free : %6d %5.1f %10d\n"),
  6157. collectionStats.objectSweepScanCount,
  6158. freeCount, (double)freeCount / (double) collectionStats.objectSweptCount * 100, freeBytes);
  6159. Output::Print(_u(" Large : "));
  6160. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6161. Output::Print(_u("%5d %6d %10d | "),
  6162. collectionStats.markData.rescanLargePageCount, collectionStats.markData.rescanLargeObjectCount, collectionStats.markData.rescanLargeByteCount);
  6163. #endif
  6164. #if ENABLE_PARTIAL_GC
  6165. Output::Print(_u("Client : %5d | "), collectionStats.clientTrackedObjectCount);
  6166. #else
  6167. Output::Print(_u(" | "));
  6168. #endif
  6169. Output::Print(_u(" Finalize : %7d | Free List: %6d %5.1f %10d\n"),
  6170. collectionStats.finalizeSweepCount,
  6171. collectionStats.objectSweptFreeListCount, (double)collectionStats.objectSweptFreeListCount / (double) collectionStats.objectSweptCount * 100, collectionStats.objectSweptFreeListBytes);
  6172. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6173. Output::Print(_u("SweptBlk: Live Free Total Free%% : Swept Swept%% : CSwpt CSwpt%%"));
  6174. #if ENABLE_PARTIAL_GC
  6175. if (this->enablePartialCollect)
  6176. {
  6177. Output::Print(_u(" | Partial : Count Bytes Existing"));
  6178. }
  6179. #endif
  6180. Output::Print(_u("\n"));
  6181. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6182. PrintHeapBlockStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6183. #if ENABLE_PARTIAL_GC
  6184. if (this->enablePartialCollect)
  6185. {
  6186. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6187. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType],
  6188. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6189. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType] * AutoSystemInfo::PageSize
  6190. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallNormalBlockType]);
  6191. }
  6192. #endif
  6193. Output::Print(_u("\n"));
  6194. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6195. #if ENABLE_PARTIAL_GC
  6196. if (this->enablePartialCollect)
  6197. {
  6198. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6199. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6200. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6201. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6202. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6203. }
  6204. #endif
  6205. Output::Print(_u("\n"));
  6206. #ifdef RECYCLER_WRITE_BARRIER
  6207. PrintHeapBlockStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6208. #if ENABLE_PARTIAL_GC
  6209. if (this->enablePartialCollect)
  6210. {
  6211. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6212. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType],
  6213. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType],
  6214. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6215. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType]);
  6216. }
  6217. #endif
  6218. Output::Print(_u("\n"));
  6219. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6220. #if ENABLE_PARTIAL_GC
  6221. if (this->enablePartialCollect)
  6222. {
  6223. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6224. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6225. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6226. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6227. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType]);
  6228. }
  6229. #endif
  6230. Output::Print(_u("\n"));
  6231. #endif
  6232. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6233. PrintHeapBlockStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6234. #if ENABLE_PARTIAL_GC
  6235. if (this->enablePartialCollect)
  6236. {
  6237. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6238. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType],
  6239. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType],
  6240. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6241. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType]);
  6242. }
  6243. #endif
  6244. Output::Print(_u("\n"));
  6245. PrintHeapBlockStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6246. #if ENABLE_PARTIAL_GC
  6247. if (this->enablePartialCollect)
  6248. {
  6249. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6250. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType],
  6251. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6252. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType] * AutoSystemInfo::PageSize
  6253. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType]);
  6254. }
  6255. #endif
  6256. Output::Print(_u("\n"));
  6257. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6258. #if ENABLE_PARTIAL_GC
  6259. if (this->enablePartialCollect)
  6260. {
  6261. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6262. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType],
  6263. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType],
  6264. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6265. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType]);
  6266. }
  6267. #endif
  6268. Output::Print(_u("\n"));
  6269. #ifdef RECYCLER_WRITE_BARRIER
  6270. PrintHeapBlockStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6271. #if ENABLE_PARTIAL_GC
  6272. if (this->enablePartialCollect)
  6273. {
  6274. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6275. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType],
  6276. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType],
  6277. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6278. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType]);
  6279. }
  6280. #endif
  6281. Output::Print(_u("\n"));
  6282. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6283. #if ENABLE_PARTIAL_GC
  6284. if (this->enablePartialCollect)
  6285. {
  6286. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6287. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6288. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6289. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6290. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType]);
  6291. }
  6292. #endif
  6293. Output::Print(_u("\n"));
  6294. #endif
  6295. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6296. PrintHeapBlockStats(_u("MdLeaf"), HeapBlock::MediumNormalBlockType);
  6297. #if ENABLE_PARTIAL_GC
  6298. if (this->enablePartialCollect)
  6299. {
  6300. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6301. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType],
  6302. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType],
  6303. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6304. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType]);
  6305. }
  6306. #endif
  6307. Output::Print(_u("\n"));
  6308. // TODO: This can't possibly be correct...check on this later
  6309. PrintHeapBlockStats(_u("Large"), HeapBlock::LargeBlockType);
  6310. #if ENABLE_PARTIAL_GC
  6311. if (this->enablePartialCollect)
  6312. {
  6313. Output::Print(_u(" | UnusedFin : %5d %10d %10d"),
  6314. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6315. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6316. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6317. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6318. }
  6319. #endif
  6320. Output::Print(_u("\n"));
  6321. PrintMemoryStats();
  6322. Output::Flush();
  6323. }
  6324. #endif
  6325. #ifdef RECYCLER_PAGE_HEAP
  6326. void Recycler::VerifyPageHeapFillAfterAlloc(char* memBlock, size_t size, ObjectInfoBits attributes)
  6327. {
  6328. Assert(memBlock != nullptr);
  6329. if (IsPageHeapEnabled())
  6330. {
  6331. HeapBlock* heapBlock = this->FindHeapBlock(memBlock);
  6332. Assert(heapBlock);
  6333. if (heapBlock->IsLargeHeapBlock())
  6334. {
  6335. LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
  6336. if (largeHeapBlock->InPageHeapMode()
  6337. #ifdef RECYCLER_NO_PAGE_REUSE
  6338. && !largeHeapBlock->GetPageAllocator(this)->IsPageReuseDisabled()
  6339. #endif
  6340. )
  6341. {
  6342. largeHeapBlock->VerifyPageHeapPattern();
  6343. }
  6344. }
  6345. }
  6346. }
  6347. #endif
  6348. #ifdef RECYCLER_ZERO_MEM_CHECK
  6349. void
  6350. Recycler::VerifyZeroFill(void * address, size_t size)
  6351. {
  6352. byte expectedFill = 0;
  6353. #ifdef RECYCLER_MEMORY_VERIFY
  6354. if (this->VerifyEnabled())
  6355. {
  6356. expectedFill = Recycler::VerifyMemFill;
  6357. }
  6358. #endif
  6359. Assert(IsAll((byte *)address, size, expectedFill));
  6360. }
  6361. #endif
  6362. #ifdef RECYCLER_MEMORY_VERIFY
  6363. void
  6364. Recycler::FillCheckPad(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6365. {
  6366. if (this->VerifyEnabled())
  6367. {
  6368. void* addressToVerify = address;
  6369. size_t sizeToVerify = alignedAllocSize;
  6370. if (objectAlreadyInitialized)
  6371. {
  6372. addressToVerify = ((char*) address + size);
  6373. sizeToVerify = (alignedAllocSize - size);
  6374. }
  6375. else
  6376. {
  6377. // It could be the case that an uninitialized object already has a dummy vtable installed
  6378. // at the beginning of the address. If that is the case, we can't verify the fill pattern
  6379. // on that memory, since it's already been initialized.
  6380. // Note that FillPadNoCheck will skip over the first sizeof(FreeObject) bytes, which
  6381. // prevents overwriting of the vtable.
  6382. static_assert(sizeof(DummyVTableObject) == sizeof(void*), "Incorrect size for a DummyVTableObject - it must contain a single v-table pointer");
  6383. DummyVTableObject dummy;
  6384. if ((*(void**)(&dummy)) == *((void**)address))
  6385. {
  6386. addressToVerify = (char*)address + sizeof(DummyVTableObject);
  6387. sizeToVerify = alignedAllocSize - sizeof(DummyVTableObject);
  6388. }
  6389. }
  6390. // Actually this is filling the non-pad to zero
  6391. VerifyCheckFill(addressToVerify, sizeToVerify - sizeof(size_t));
  6392. FillPadNoCheck(address, size, alignedAllocSize, objectAlreadyInitialized);
  6393. }
  6394. }
  6395. void
  6396. Recycler::FillPadNoCheck(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6397. {
  6398. // Ignore the first word
  6399. if (!objectAlreadyInitialized && size > sizeof(FreeObject))
  6400. {
  6401. memset((char *)address + sizeof(FreeObject), 0, size - sizeof(FreeObject));
  6402. }
  6403. // write the pad size at the end;
  6404. *(size_t *)((char *)address + alignedAllocSize - sizeof(size_t)) = alignedAllocSize - size;
  6405. }
  6406. void Recycler::Verify(Js::Phase phase)
  6407. {
  6408. if (verifyEnabled && (!this->CollectionInProgress()))
  6409. {
  6410. if (GetRecyclerFlagsTable().RecyclerVerify.IsEnabled(phase))
  6411. {
  6412. autoHeap.Verify();
  6413. }
  6414. }
  6415. }
  6416. void Recycler::VerifyCheck(BOOL cond, char16 const * msg, void * address, void * corruptedAddress)
  6417. {
  6418. if (!(cond))
  6419. {
  6420. fwprintf(stderr, _u("RECYCLER CORRUPTION: StartAddress=%p CorruptedAddress=%p: %s"), address, corruptedAddress, msg);
  6421. Js::Throw::FatalInternalError();
  6422. }
  6423. }
  6424. void Recycler::VerifyCheckFill(void * address, size_t size)
  6425. {
  6426. Assert(IsAll((byte*)address, size, Recycler::VerifyMemFill));
  6427. }
  6428. void Recycler::VerifyCheckPadExplicitFreeList(void * address, size_t size)
  6429. {
  6430. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6431. size_t padding = *paddingAddress;
  6432. #pragma warning(suppress:4310)
  6433. Assert(padding != (size_t)0xCACACACACACACACA); // Explicit free objects have to have been initialized at some point before they were freed
  6434. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6435. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6436. {
  6437. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6438. }
  6439. }
  6440. void Recycler::VerifyCheckPad(void * address, size_t size)
  6441. {
  6442. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6443. size_t padding = *paddingAddress;
  6444. #pragma warning(suppress:4310)
  6445. if (padding == (size_t)0xCACACACACACACACA)
  6446. {
  6447. // Nascent block have objects that are not initialized with pad size
  6448. Recycler::VerifyCheckFill(address, size);
  6449. return;
  6450. }
  6451. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6452. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6453. {
  6454. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6455. }
  6456. }
  6457. #endif
  6458. Recycler::AutoSetupRecyclerForNonCollectingMark::AutoSetupRecyclerForNonCollectingMark(Recycler& recycler, bool setupForHeapEnumeration)
  6459. : m_recycler(recycler), m_setupDone(false)
  6460. {
  6461. if (! setupForHeapEnumeration)
  6462. {
  6463. DoCommonSetup();
  6464. }
  6465. }
  6466. void Recycler::AutoSetupRecyclerForNonCollectingMark::DoCommonSetup()
  6467. {
  6468. Assert(m_recycler.collectionState == CollectionStateNotCollecting || m_recycler.collectionState == CollectionStateExit);
  6469. #if ENABLE_CONCURRENT_GC
  6470. Assert(!m_recycler.DoQueueTrackedObject());
  6471. #endif
  6472. #if ENABLE_PARTIAL_GC
  6473. // We need to get out of partial collect before we do the mark because we
  6474. // will mess with the free bit vector state
  6475. // GC-CONSIDER: don't mess with the free bit vector?
  6476. if (m_recycler.inPartialCollectMode)
  6477. {
  6478. m_recycler.FinishPartialCollect();
  6479. }
  6480. #endif
  6481. m_previousCollectionState = m_recycler.collectionState;
  6482. #ifdef RECYCLER_STATS
  6483. m_previousCollectionStats = m_recycler.collectionStats;
  6484. memset(&m_recycler.collectionStats, 0, sizeof(RecyclerCollectionStats));
  6485. #endif
  6486. m_setupDone = true;
  6487. }
  6488. void Recycler::AutoSetupRecyclerForNonCollectingMark::SetupForHeapEnumeration()
  6489. {
  6490. Assert(!m_recycler.isHeapEnumInProgress);
  6491. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6492. m_recycler.EnsureNotCollecting();
  6493. DoCommonSetup();
  6494. m_recycler.ResetMarks(ResetMarkFlags_HeapEnumeration);
  6495. m_recycler.collectionState = CollectionStateNotCollecting;
  6496. m_recycler.isHeapEnumInProgress = true;
  6497. m_recycler.isCollectionDisabled = true;
  6498. }
  6499. Recycler::AutoSetupRecyclerForNonCollectingMark::~AutoSetupRecyclerForNonCollectingMark()
  6500. {
  6501. Assert(m_setupDone);
  6502. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6503. #ifdef RECYCLER_STATS
  6504. m_recycler.collectionStats = m_previousCollectionStats;
  6505. #endif
  6506. m_recycler.collectionState = m_previousCollectionState;
  6507. m_recycler.isHeapEnumInProgress = false;
  6508. m_recycler.isCollectionDisabled = false;
  6509. }
  6510. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  6511. bool Recycler::DumpObjectGraph(RecyclerObjectGraphDumper::Param * param)
  6512. {
  6513. bool succeeded = false;
  6514. bool isExited = (this->collectionState == CollectionStateExit);
  6515. if (isExited)
  6516. {
  6517. this->collectionState = CollectionStateNotCollecting;
  6518. }
  6519. if (this->collectionState != CollectionStateNotCollecting)
  6520. {
  6521. Output::Print(_u("Can't dump object graph when collecting\n"));
  6522. Output::Flush();
  6523. return succeeded;
  6524. }
  6525. BEGIN_NO_EXCEPTION
  6526. {
  6527. RecyclerObjectGraphDumper objectGraphDumper(this, param);
  6528. Recycler::AutoSetupRecyclerForNonCollectingMark AutoSetupRecyclerForNonCollectingMark(*this);
  6529. AutoRestoreValue<bool> skipStackToggle(&this->skipStack, this->skipStack || (param && param->skipStack));
  6530. this->Mark();
  6531. this->objectGraphDumper = nullptr;
  6532. #ifdef RECYCLER_STATS
  6533. if (param)
  6534. {
  6535. param->stats = this->collectionStats;
  6536. }
  6537. #endif
  6538. succeeded = !objectGraphDumper.isOutOfMemory;
  6539. }
  6540. END_NO_EXCEPTION
  6541. if (isExited)
  6542. {
  6543. this->collectionState = CollectionStateExit;
  6544. }
  6545. if (!succeeded)
  6546. {
  6547. Output::Print(_u("Out of memory dumping object graph\n"));
  6548. }
  6549. Output::Flush();
  6550. return succeeded;
  6551. }
  6552. void
  6553. Recycler::DumpObjectDescription(void *objectAddress)
  6554. {
  6555. #ifdef PROFILE_RECYCLER_ALLOC
  6556. type_info const * typeinfo = nullptr;
  6557. bool isArray = false;
  6558. if (this->trackerDictionary)
  6559. {
  6560. TrackerData * trackerData = GetTrackerData(objectAddress);
  6561. if (trackerData != nullptr)
  6562. {
  6563. typeinfo = trackerData->typeinfo;
  6564. isArray = trackerData->isArray;
  6565. }
  6566. else
  6567. {
  6568. Assert(false);
  6569. }
  6570. }
  6571. RecyclerObjectDumper::DumpObject(typeinfo, isArray, objectAddress);
  6572. #else
  6573. Output::Print(_u("Address %p"), objectAddress);
  6574. #endif
  6575. }
  6576. #endif
  6577. #ifdef RECYCLER_STRESS
  6578. // All stress mode collect art implicitly instantiate here
  6579. bool
  6580. Recycler::StressCollectNow()
  6581. {
  6582. if (this->recyclerStress)
  6583. {
  6584. this->CollectNow<CollectStress>();
  6585. return true;
  6586. }
  6587. #if ENABLE_CONCURRENT_GC
  6588. else if (this->recyclerBackgroundStress)
  6589. {
  6590. this->CollectNow<CollectBackgroundStress>();
  6591. return true;
  6592. }
  6593. else if ((this->enableConcurrentMark || this->enableConcurrentSweep)
  6594. && (this->recyclerConcurrentStress
  6595. || this->recyclerConcurrentRepeatStress))
  6596. {
  6597. #if ENABLE_PARTIAL_GC
  6598. if (this->recyclerPartialStress)
  6599. {
  6600. this->CollectNow<CollectConcurrentPartialStress>();
  6601. return true;
  6602. }
  6603. else
  6604. #endif // ENABLE_PARTIAL_GC
  6605. {
  6606. this->CollectNow<CollectConcurrentStress>();
  6607. return true;
  6608. }
  6609. }
  6610. #endif // ENABLE_CONCURRENT_GC
  6611. #if ENABLE_PARTIAL_GC
  6612. else if (this->recyclerPartialStress)
  6613. {
  6614. this->CollectNow<CollectPartialStress>();
  6615. return true;
  6616. }
  6617. #endif // ENABLE_PARTIAL_GC
  6618. return false;
  6619. }
  6620. #endif // RECYCLER_STRESS
  6621. #ifdef TRACK_ALLOC
  6622. Recycler *
  6623. Recycler::TrackAllocInfo(TrackAllocData const& data)
  6624. {
  6625. #ifdef PROFILE_RECYCLER_ALLOC
  6626. if (this->trackerDictionary != nullptr)
  6627. {
  6628. Assert(nextAllocData.IsEmpty());
  6629. nextAllocData = data;
  6630. }
  6631. #endif
  6632. return this;
  6633. }
  6634. void
  6635. Recycler::ClearTrackAllocInfo(TrackAllocData* data/* = NULL*/)
  6636. {
  6637. #ifdef PROFILE_RECYCLER_ALLOC
  6638. if (this->trackerDictionary != nullptr)
  6639. {
  6640. AssertMsg(!nextAllocData.IsEmpty(), "Missing tracking information for this allocation, are you not using the macros?");
  6641. if (data)
  6642. {
  6643. *data = nextAllocData;
  6644. }
  6645. nextAllocData.Clear();
  6646. }
  6647. #endif
  6648. }
  6649. #ifdef PROFILE_RECYCLER_ALLOC
  6650. bool
  6651. Recycler::DoProfileAllocTracker()
  6652. {
  6653. bool doTracker = false;
  6654. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  6655. doTracker = Js::Configuration::Global.flags.DumpObjectGraphOnExit
  6656. || Js::Configuration::Global.flags.DumpObjectGraphOnCollect
  6657. || Js::Configuration::Global.flags.DumpObjectGraphOnEnum;
  6658. #endif
  6659. #ifdef LEAK_REPORT
  6660. if (Js::Configuration::Global.flags.IsEnabled(Js::LeakReportFlag))
  6661. {
  6662. doTracker = true;
  6663. }
  6664. #endif
  6665. #ifdef CHECK_MEMORY_LEAK
  6666. if (Js::Configuration::Global.flags.CheckMemoryLeak)
  6667. {
  6668. doTracker = true;
  6669. }
  6670. #endif
  6671. if (CONFIG_FLAG(KeepRecyclerTrackData))
  6672. {
  6673. doTracker = true;
  6674. }
  6675. return doTracker || MemoryProfiler::DoTrackRecyclerAllocation();
  6676. }
  6677. void
  6678. Recycler::InitializeProfileAllocTracker()
  6679. {
  6680. if (DoProfileAllocTracker())
  6681. {
  6682. trackerDictionary = NoCheckHeapNew(TypeInfotoTrackerItemMap, &NoCheckHeapAllocator::Instance, 163);
  6683. trackerCriticalSection = new CriticalSection(1000);
  6684. #pragma prefast(suppress:6031, "InitializeCriticalSectionAndSpinCount always succeed since Vista. No need to check return value");
  6685. }
  6686. nextAllocData.Clear();
  6687. }
  6688. void
  6689. Recycler::TrackAllocCore(void * object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  6690. {
  6691. auto&& typeInfo = trackAllocData.GetTypeInfo();
  6692. if (CONFIG_FLAG(KeepRecyclerTrackData))
  6693. {
  6694. TrackFree((char*)object, size);
  6695. }
  6696. Assert(GetTrackerData(object) == nullptr || GetTrackerData(object) == &TrackerData::ExplicitFreeListObjectData);
  6697. Assert(typeInfo != nullptr);
  6698. TrackerItem * item;
  6699. size_t allocCount = trackAllocData.GetCount();
  6700. size_t itemSize = (size - trackAllocData.GetPlusSize());
  6701. bool isArray;
  6702. if (allocCount != (size_t)-1)
  6703. {
  6704. isArray = true;
  6705. itemSize = itemSize / allocCount;
  6706. }
  6707. else
  6708. {
  6709. isArray = false;
  6710. allocCount = 1;
  6711. }
  6712. if (!trackerDictionary->TryGetValue(typeInfo, &item))
  6713. {
  6714. #ifdef STACK_BACK_TRACE
  6715. if (CONFIG_FLAG(KeepRecyclerTrackData) && isArray) // type info is not useful record stack instead
  6716. {
  6717. size_t stackTraceSize = 16 * sizeof(void*);
  6718. item = NoCheckHeapNewPlus(stackTraceSize, TrackerItem, typeInfo);
  6719. StackBackTrace::Capture((char*)&item[1], stackTraceSize, 7);
  6720. }
  6721. else
  6722. #endif
  6723. {
  6724. item = NoCheckHeapNew(TrackerItem, typeInfo);
  6725. }
  6726. item->instanceData.ItemSize = itemSize;
  6727. item->arrayData.ItemSize = itemSize;
  6728. trackerDictionary->Item(typeInfo, item);
  6729. }
  6730. else
  6731. {
  6732. Assert(item->instanceData.typeinfo == typeInfo);
  6733. Assert(item->instanceData.ItemSize == itemSize);
  6734. Assert(item->arrayData.ItemSize == itemSize);
  6735. }
  6736. TrackerData& data = (isArray)? item->arrayData : item->instanceData;
  6737. data.ItemCount += allocCount;
  6738. data.AllocCount++;
  6739. data.ReqSize += size;
  6740. data.AllocSize += HeapInfo::GetAlignedSizeNoCheck(size);
  6741. #ifdef TRACE_OBJECT_LIFETIME
  6742. data.TraceLifetime = traceLifetime;
  6743. if (traceLifetime)
  6744. {
  6745. Output::Print(data.isArray ? _u("Allocated %S[] %p\n") : _u("Allocated %S %p\n"), data.typeinfo->name(), object);
  6746. }
  6747. #endif
  6748. #ifdef PERF_COUNTERS
  6749. ++data.counter;
  6750. data.sizeCounter += HeapInfo::GetAlignedSizeNoCheck(size);
  6751. #endif
  6752. SetTrackerData(object, &data);
  6753. }
  6754. void* Recycler::TrackAlloc(void* object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  6755. {
  6756. if (this->trackerDictionary != nullptr)
  6757. {
  6758. Assert(nextAllocData.IsEmpty()); // should have been cleared
  6759. trackerCriticalSection->Enter();
  6760. TrackAllocCore(object, size, trackAllocData);
  6761. trackerCriticalSection->Leave();
  6762. }
  6763. return object;
  6764. }
  6765. void
  6766. Recycler::TrackIntegrate(__in_ecount(blockSize) char * blockAddress, size_t blockSize, size_t allocSize, size_t objectSize, const TrackAllocData& trackAllocData)
  6767. {
  6768. if (this->trackerDictionary != nullptr)
  6769. {
  6770. Assert(nextAllocData.IsEmpty()); // should have been cleared
  6771. trackerCriticalSection->Enter();
  6772. char * address = blockAddress;
  6773. char * blockEnd = blockAddress + blockSize;
  6774. while (address + allocSize <= blockEnd)
  6775. {
  6776. TrackAllocCore(address, objectSize, trackAllocData);
  6777. address += allocSize;
  6778. }
  6779. trackerCriticalSection->Leave();
  6780. }
  6781. }
  6782. BOOL Recycler::TrackFree(const char* address, size_t size)
  6783. {
  6784. if (this->trackerDictionary != nullptr)
  6785. {
  6786. trackerCriticalSection->Enter();
  6787. TrackerData * data = GetTrackerData((char *)address);
  6788. if (data != nullptr)
  6789. {
  6790. if (data != &TrackerData::EmptyData)
  6791. {
  6792. #ifdef PERF_COUNTERS
  6793. --data->counter;
  6794. data->sizeCounter -= size;
  6795. #endif
  6796. if (data->typeinfo == &typeid(RecyclerWeakReferenceBase))
  6797. {
  6798. TrackFreeWeakRef((RecyclerWeakReferenceBase *)address);
  6799. }
  6800. data->FreeSize += size;
  6801. data->FreeCount++;
  6802. #ifdef TRACE_OBJECT_LIFETIME
  6803. if (data->TraceLifetime)
  6804. {
  6805. Output::Print(data->isArray ? _u("Freed %S[] %p\n") : _u("Freed %S %p\n"), data->typeinfo->name(), address);
  6806. }
  6807. #endif
  6808. }
  6809. SetTrackerData((char *)address, nullptr);
  6810. }
  6811. else
  6812. {
  6813. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  6814. {
  6815. Assert(false);
  6816. }
  6817. }
  6818. trackerCriticalSection->Leave();
  6819. }
  6820. return true;
  6821. }
  6822. Recycler::TrackerData *
  6823. Recycler::GetTrackerData(void * address)
  6824. {
  6825. HeapBlock * heapBlock = this->FindHeapBlock(address);
  6826. Assert(heapBlock != nullptr);
  6827. return (Recycler::TrackerData *)heapBlock->GetTrackerData(address);
  6828. }
  6829. void
  6830. Recycler::SetTrackerData(void * address, TrackerData * data)
  6831. {
  6832. HeapBlock * heapBlock = this->FindHeapBlock(address);
  6833. Assert(heapBlock != nullptr);
  6834. heapBlock->SetTrackerData(address, data);
  6835. }
  6836. void
  6837. Recycler::TrackUnallocated(__in char* address, __in char *endAddress, size_t sizeCat)
  6838. {
  6839. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  6840. {
  6841. if (this->trackerDictionary != nullptr)
  6842. {
  6843. trackerCriticalSection->Enter();
  6844. while (address + sizeCat <= endAddress)
  6845. {
  6846. Assert(GetTrackerData(address) == nullptr);
  6847. SetTrackerData(address, &TrackerData::EmptyData);
  6848. address += sizeCat;
  6849. }
  6850. trackerCriticalSection->Leave();
  6851. }
  6852. }
  6853. }
  6854. void
  6855. Recycler::TrackAllocWeakRef(RecyclerWeakReferenceBase * weakRef)
  6856. {
  6857. #if ENABLE_RECYCLER_TYPE_TRACKING
  6858. Assert(weakRef->typeInfo != nullptr);
  6859. #endif
  6860. #if DBG && defined(PERF_COUNTERS)
  6861. if (this->trackerDictionary != nullptr)
  6862. {
  6863. TrackerItem * item;
  6864. if (trackerDictionary->TryGetValue(weakRef->typeInfo, &item))
  6865. {
  6866. weakRef->counter = &item->weakRefCounter;
  6867. }
  6868. else
  6869. {
  6870. weakRef->counter = &PerfCounter::RecyclerTrackerCounterSet::GetWeakRefPerfCounter(weakRef->typeInfo);
  6871. }
  6872. ++(*weakRef->counter);
  6873. }
  6874. #endif
  6875. }
  6876. void
  6877. Recycler::TrackFreeWeakRef(RecyclerWeakReferenceBase * weakRef)
  6878. {
  6879. #if DBG && defined(PERF_COUNTERS)
  6880. if (weakRef->counter != nullptr)
  6881. {
  6882. --(*weakRef->counter);
  6883. }
  6884. #endif
  6885. }
  6886. void
  6887. Recycler::PrintAllocStats()
  6888. {
  6889. if (this->trackerDictionary == nullptr)
  6890. {
  6891. return;
  6892. }
  6893. size_t itemCount = 0;
  6894. int allocCount = 0;
  6895. int64 reqSize = 0;
  6896. int64 allocSize = 0;
  6897. int freeCount = 0;
  6898. int64 freeSize = 0;
  6899. Output::Print(_u("=================================================================================================================\n"));
  6900. Output::Print(_u("Recycler Allocations\n"));
  6901. Output::Print(_u("=================================================================================================================\n"));
  6902. Output::Print(_u("ItemSize ItemCount AllocCount RequestSize AllocSize FreeCount FreeSize DiffCount DiffSize \n"));
  6903. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  6904. for (int i = 0; i < trackerDictionary->Count(); i++)
  6905. {
  6906. TrackerItem * item = trackerDictionary->GetValueAt(i);
  6907. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  6908. if (item->instanceData.AllocCount != 0)
  6909. {
  6910. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S\n"),
  6911. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  6912. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  6913. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  6914. itemCount += item->instanceData.ItemCount;
  6915. allocCount += item->instanceData.AllocCount;
  6916. reqSize += item->instanceData.ReqSize;
  6917. allocSize += item->instanceData.AllocSize;
  6918. freeCount += item->instanceData.FreeCount;
  6919. freeSize += item->instanceData.FreeSize;
  6920. }
  6921. if (item->arrayData.AllocCount != 0)
  6922. {
  6923. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S[]\n"),
  6924. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  6925. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  6926. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  6927. itemCount += item->arrayData.ItemCount;
  6928. allocCount += item->arrayData.AllocCount;
  6929. reqSize += item->arrayData.ReqSize;
  6930. allocSize += item->arrayData.AllocSize;
  6931. freeCount += item->arrayData.FreeCount;
  6932. freeSize += item->arrayData.FreeSize;
  6933. }
  6934. }
  6935. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  6936. Output::Print(_u(" %8d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d **Total**\n"),
  6937. itemCount, allocCount, reqSize, allocSize, freeCount, freeSize, allocCount - freeCount, allocSize - freeSize);
  6938. #ifdef EXCEL_FRIENDLY_DUMP
  6939. Output::Print(_u("\nExcel friendly version\nItemSize\tItemCount\tAllocCount\tRequestSize\tAllocSize\tFreeCount\tFreeSize\tDiffCount\tDiffSize\tType\n"));
  6940. for (int i = 0; i < trackerDictionary->Count(); i++)
  6941. {
  6942. TrackerItem * item = trackerDictionary->GetValueAt(i);
  6943. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  6944. if (item->instanceData.AllocCount != 0)
  6945. {
  6946. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S\n"),
  6947. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  6948. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  6949. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  6950. }
  6951. if (item->arrayData.AllocCount != 0)
  6952. {
  6953. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S[]\n"),
  6954. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  6955. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  6956. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  6957. }
  6958. }
  6959. #endif // EXCEL_FRIENDLY_DUMP
  6960. Output::Flush();
  6961. }
  6962. #endif // PROFILE_RECYCLER_ALLOC
  6963. #endif // TRACK_ALLOC
  6964. #ifdef RECYCLER_VERIFY_MARK
  6965. void
  6966. Recycler::VerifyMark()
  6967. {
  6968. VerifyMarkRoots();
  6969. // Can't really verify stack since the recycler code between ScanStack to now may have introduce false references.
  6970. // VerifyMarkStack();
  6971. autoHeap.VerifyMark();
  6972. }
  6973. void
  6974. Recycler::VerifyMarkRoots()
  6975. {
  6976. {
  6977. this->VerifyMark(transientPinnedObject);
  6978. pinnedObjectMap.Map([this](void * obj, PinRecord const &refCount)
  6979. {
  6980. if (refCount == 0)
  6981. {
  6982. Assert(this->hasPendingUnpinnedObject);
  6983. }
  6984. else
  6985. {
  6986. // Use the pinrecord as the source reference
  6987. this->VerifyMark(obj);
  6988. }
  6989. });
  6990. }
  6991. DList<GuestArenaAllocator, HeapAllocator>::Iterator guestArenaIter(&guestArenaList);
  6992. while (guestArenaIter.Next())
  6993. {
  6994. if (guestArenaIter.Data().pendingDelete)
  6995. {
  6996. Assert(this->hasPendingDeleteGuestArena);
  6997. }
  6998. else
  6999. {
  7000. VerifyMarkArena(&guestArenaIter.Data());
  7001. }
  7002. }
  7003. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  7004. while (externalGuestArenaIter.Next())
  7005. {
  7006. VerifyMarkArena(externalGuestArenaIter.Data());
  7007. }
  7008. // We can't check external roots here
  7009. }
  7010. void
  7011. Recycler::VerifyMarkArena(ArenaData * alloc)
  7012. {
  7013. VerifyMarkBigBlockList(alloc->GetBigBlocks(false));
  7014. VerifyMarkBigBlockList(alloc->GetFullBlocks());
  7015. VerifyMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  7016. }
  7017. void
  7018. Recycler::VerifyMarkBigBlockList(BigBlock * memoryBlocks)
  7019. {
  7020. size_t scanRootBytes = 0;
  7021. BigBlock *blockp = memoryBlocks;
  7022. while (blockp != NULL)
  7023. {
  7024. void** base=(void**)blockp->GetBytes();
  7025. size_t slotCount = blockp->currentByte / sizeof(void*);
  7026. scanRootBytes += blockp->currentByte;
  7027. for (size_t i=0; i < slotCount; i++)
  7028. {
  7029. VerifyMark(base[i]);
  7030. }
  7031. blockp = blockp->nextBigBlock;
  7032. }
  7033. }
  7034. void
  7035. Recycler::VerifyMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  7036. {
  7037. size_t scanRootBytes = 0;
  7038. ArenaMemoryBlock *blockp = memoryBlocks;
  7039. while (blockp != NULL)
  7040. {
  7041. void** base=(void**)blockp->GetBytes();
  7042. size_t slotCount = blockp->nbytes / sizeof(void*);
  7043. scanRootBytes += blockp->nbytes;
  7044. for (size_t i=0; i< slotCount; i++)
  7045. {
  7046. VerifyMark(base[i]);
  7047. }
  7048. blockp = blockp->next;
  7049. }
  7050. }
  7051. void
  7052. Recycler::VerifyMarkStack()
  7053. {
  7054. SAVE_THREAD_CONTEXT();
  7055. void ** stackTop = (void**) this->savedThreadContext.GetStackTop();
  7056. void * stackStart = GetStackBase();
  7057. Assert(stackStart > stackTop);
  7058. for (;stackTop < stackStart; stackTop++)
  7059. {
  7060. void* candidate = *stackTop;
  7061. VerifyMark(nullptr, candidate);
  7062. }
  7063. void** registers = this->savedThreadContext.GetRegisters();
  7064. for (int i = 0; i < SavedRegisterState::NumRegistersToSave; i++)
  7065. {
  7066. VerifyMark(nullptr, registers[i]);
  7067. }
  7068. }
  7069. bool
  7070. Recycler::VerifyMark(void * target)
  7071. {
  7072. return VerifyMark(nullptr, target);
  7073. }
  7074. // objectAddress is nullptr in case of roots
  7075. bool
  7076. Recycler::VerifyMark(void * objectAddress, void * target)
  7077. {
  7078. void * realAddress;
  7079. HeapBlock * heapBlock;
  7080. if (this->enableScanInteriorPointers)
  7081. {
  7082. heapBlock = heapBlockMap.GetHeapBlock(target);
  7083. if (heapBlock == nullptr)
  7084. {
  7085. return false;
  7086. }
  7087. realAddress = heapBlock->GetRealAddressFromInterior(target);
  7088. if (realAddress == nullptr)
  7089. {
  7090. return false;
  7091. }
  7092. }
  7093. else
  7094. {
  7095. heapBlock = this->FindHeapBlock(target);
  7096. if (heapBlock == nullptr)
  7097. {
  7098. return false;
  7099. }
  7100. realAddress = target;
  7101. }
  7102. return heapBlock->VerifyMark(objectAddress, realAddress);
  7103. }
  7104. #endif
  7105. ArenaAllocator *
  7106. Recycler::CreateGuestArena(char16 const * name, void (*outOfMemoryFunc)())
  7107. {
  7108. // Note, guest arenas use the large block allocator.
  7109. return guestArenaList.PrependNode(&HeapAllocator::Instance, name, &recyclerLargeBlockPageAllocator, outOfMemoryFunc);
  7110. }
  7111. void
  7112. Recycler::DeleteGuestArena(ArenaAllocator * arenaAllocator)
  7113. {
  7114. GuestArenaAllocator * guestArenaAllocator = static_cast<GuestArenaAllocator *>(arenaAllocator);
  7115. #if ENABLE_CONCURRENT_GC
  7116. if (this->hasPendingConcurrentFindRoot)
  7117. {
  7118. // We are doing concurrent find root, don't modify the list and mark the arena to be delete
  7119. // later when we do find root in thread.
  7120. Assert(guestArenaList.HasElement(guestArenaAllocator));
  7121. this->hasPendingDeleteGuestArena = true;
  7122. guestArenaAllocator->pendingDelete = true;
  7123. }
  7124. else
  7125. #endif
  7126. {
  7127. guestArenaList.RemoveElement(&HeapAllocator::Instance, guestArenaAllocator);
  7128. }
  7129. // Any time a root is removed during a GC, it indicates that an exhaustive
  7130. // collection is likely going to have work to do so trigger an exhaustive
  7131. // candidate GC to indicate this fact
  7132. this->CollectNow<CollectExhaustiveCandidate>();
  7133. }
  7134. #ifdef LEAK_REPORT
  7135. void
  7136. Recycler::ReportLeaks()
  7137. {
  7138. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7139. {
  7140. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7141. {
  7142. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7143. struct FakeMemory { Field(int) f; };
  7144. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7145. this->RootAddRef(f);
  7146. }
  7147. LeakReport::StartSection(_u("Object Graph"));
  7148. LeakReport::StartRedirectOutput();
  7149. RecyclerObjectGraphDumper::Param param = { 0 };
  7150. param.skipStack = true;
  7151. if (!this->DumpObjectGraph(&param))
  7152. {
  7153. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7154. LeakReport::Print(_u("ERROR: Out of memory generating leak report\n"));
  7155. param.stats.markData.markCount = 0;
  7156. }
  7157. LeakReport::EndRedirectOutput();
  7158. if (param.stats.markData.markCount != 0)
  7159. {
  7160. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7161. LeakReport::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7162. param.stats.markData.markBytes, param.stats.markData.markCount);
  7163. #ifdef STACK_BACK_TRACE
  7164. if (GetRecyclerFlagsTable().LeakStackTrace)
  7165. {
  7166. LeakReport::StartSection(_u("Pinned object stack traces"));
  7167. LeakReport::StartRedirectOutput();
  7168. this->PrintPinnedObjectStackTraces();
  7169. LeakReport::EndRedirectOutput();
  7170. LeakReport::EndSection();
  7171. }
  7172. #endif
  7173. }
  7174. LeakReport::EndSection();
  7175. }
  7176. }
  7177. void
  7178. Recycler::ReportLeaksOnProcessDetach()
  7179. {
  7180. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7181. {
  7182. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): Process Termination"), this);
  7183. LeakReport::StartRedirectOutput();
  7184. ReportOnProcessDetach([=]() { this->ReportLeaks(); });
  7185. LeakReport::EndRedirectOutput();
  7186. }
  7187. }
  7188. #endif
  7189. #ifdef CHECK_MEMORY_LEAK
  7190. void
  7191. Recycler::CheckLeaks(char16 const * header)
  7192. {
  7193. if (GetRecyclerFlagsTable().CheckMemoryLeak && this->isPrimaryMarkContextInitialized)
  7194. {
  7195. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7196. {
  7197. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7198. struct FakeMemory { Field(int) f; };
  7199. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7200. this->RootAddRef(f);
  7201. }
  7202. Output::CaptureStart();
  7203. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7204. Output::Print(_u("Recycler (%p): %s Leaked Roots\n"), this, header);
  7205. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7206. RecyclerObjectGraphDumper::Param param = { 0 };
  7207. param.dumpRootOnly = true;
  7208. param.skipStack = true;
  7209. if (!this->DumpObjectGraph(&param))
  7210. {
  7211. free(Output::CaptureEnd());
  7212. Output::Print(_u("ERROR: Out of memory generating leak report\n"));
  7213. return;
  7214. }
  7215. if (param.stats.markData.markCount != 0)
  7216. {
  7217. #ifdef STACK_BACK_TRACE
  7218. if (GetRecyclerFlagsTable().LeakStackTrace)
  7219. {
  7220. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7221. Output::Print(_u("Pinned object stack traces"));
  7222. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7223. this->PrintPinnedObjectStackTraces();
  7224. }
  7225. #endif
  7226. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7227. Output::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7228. param.stats.markData.markBytes, param.stats.markData.markCount);
  7229. char16 * buffer = Output::CaptureEnd();
  7230. MemoryLeakCheck::AddLeakDump(buffer, param.stats.markData.markBytes, param.stats.markData.markCount);
  7231. #ifdef GENERATE_DUMP
  7232. if (GetRecyclerFlagsTable().IsEnabled(Js::DumpOnLeakFlag))
  7233. {
  7234. Js::Throw::GenerateDump(GetRecyclerFlagsTable().DumpOnLeak);
  7235. }
  7236. #endif
  7237. }
  7238. else
  7239. {
  7240. free(Output::CaptureEnd());
  7241. }
  7242. }
  7243. }
  7244. void
  7245. Recycler::CheckLeaksOnProcessDetach(char16 const * header)
  7246. {
  7247. if (GetRecyclerFlagsTable().CheckMemoryLeak)
  7248. {
  7249. ReportOnProcessDetach([=]() { this->CheckLeaks(header); });
  7250. }
  7251. }
  7252. #endif
  7253. #if defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7254. template <class Fn>
  7255. void
  7256. Recycler::ReportOnProcessDetach(Fn fn)
  7257. {
  7258. #if DBG
  7259. // Process detach can be done on any thread, just disable the thread check
  7260. this->markContext.GetPageAllocator()->SetDisableThreadAccessCheck();
  7261. #endif
  7262. #if ENABLE_CONCURRENT_GC
  7263. if (this->IsConcurrentState())
  7264. {
  7265. this->AbortConcurrent(true);
  7266. }
  7267. if (this->CollectionInProgress())
  7268. {
  7269. Output::Print(_u("WARNING: Thread terminated during GC. Can't dump object graph\n"));
  7270. return;
  7271. }
  7272. #else
  7273. Assert(!this->CollectionInProgress());
  7274. #endif
  7275. // Don't mark external roots on another thread
  7276. this->SetExternalRootMarker(NULL, NULL);
  7277. #if DBG
  7278. this->ResetThreadId();
  7279. #endif
  7280. fn();
  7281. }
  7282. #ifdef STACK_BACK_TRACE
  7283. void
  7284. Recycler::PrintPinnedObjectStackTraces()
  7285. {
  7286. pinnedObjectMap.Map([this](void * object, PinRecord const& pinRecord)
  7287. {
  7288. this->DumpObjectDescription(object);
  7289. Output::Print(_u("\n"));
  7290. StackBackTraceNode::PrintAll(pinRecord.stackBackTraces);
  7291. }
  7292. );
  7293. }
  7294. #endif
  7295. #endif
  7296. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7297. void
  7298. Recycler::SetInDllCanUnloadNow()
  7299. {
  7300. inDllCanUnloadNow = true;
  7301. // Just clear out the root marker for the dump graph and report leaks
  7302. SetExternalRootMarker(NULL, NULL);
  7303. }
  7304. void
  7305. Recycler::SetInDetachProcess()
  7306. {
  7307. inDetachProcess = true;
  7308. // Just clear out the root marker for the dump graph and report leaks
  7309. SetExternalRootMarker(NULL, NULL);
  7310. }
  7311. #endif
  7312. #ifdef ENABLE_JS_ETW
  7313. ULONG Recycler::EventWriteFreeMemoryBlock(HeapBlock* heapBlock)
  7314. {
  7315. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK())
  7316. {
  7317. char* memoryAddress = NULL;
  7318. ULONG objectSize = 0;
  7319. ULONG blockSize = 0;
  7320. switch (heapBlock->GetHeapBlockType())
  7321. {
  7322. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  7323. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  7324. #ifdef RECYCLER_WRITE_BARRIER
  7325. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  7326. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  7327. #endif
  7328. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  7329. {
  7330. SmallHeapBlock* smallHeapBlock = static_cast<SmallHeapBlock*>(heapBlock);
  7331. memoryAddress = smallHeapBlock->GetAddress();
  7332. blockSize = (ULONG)(smallHeapBlock->GetEndAddress() - memoryAddress);
  7333. objectSize = smallHeapBlock->GetObjectSize();
  7334. }
  7335. break;
  7336. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  7337. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  7338. #ifdef RECYCLER_WRITE_BARRIER
  7339. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  7340. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  7341. #endif
  7342. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  7343. {
  7344. MediumHeapBlock* mediumHeapBlock = static_cast<MediumHeapBlock*>(heapBlock);
  7345. memoryAddress = mediumHeapBlock->GetAddress();
  7346. blockSize = (ULONG)(mediumHeapBlock->GetEndAddress() - memoryAddress);
  7347. objectSize = mediumHeapBlock->GetObjectSize();
  7348. }
  7349. case HeapBlock::HeapBlockType::LargeBlockType:
  7350. {
  7351. LargeHeapBlock* largeHeapBlock = static_cast<LargeHeapBlock*>(heapBlock);
  7352. memoryAddress = largeHeapBlock->GetBeginAddress();
  7353. blockSize = (ULONG)(largeHeapBlock->GetEndAddress() - memoryAddress);
  7354. objectSize = blockSize;
  7355. }
  7356. break;
  7357. default:
  7358. AssertMsg(FALSE, "invalid heapblock type");
  7359. }
  7360. EventWriteJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK(memoryAddress, blockSize, objectSize);
  7361. }
  7362. return S_OK;
  7363. }
  7364. void Recycler::FlushFreeRecord()
  7365. {
  7366. Assert(bulkFreeMemoryWrittenCount <= Recycler::BulkFreeMemoryCount);
  7367. JS_ETW(EventWriteJSCRIPT_RECYCLER_FREE_MEMORY(bulkFreeMemoryWrittenCount, sizeof(Recycler::ETWFreeRecord), etwFreeRecords));
  7368. bulkFreeMemoryWrittenCount = 0;
  7369. }
  7370. void Recycler::AppendFreeMemoryETWRecord(__in char *address, size_t size)
  7371. {
  7372. Assert(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7373. __analysis_assume(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7374. etwFreeRecords[bulkFreeMemoryWrittenCount].memoryAddress = address;
  7375. // TODO: change to size_t or uint64?
  7376. etwFreeRecords[bulkFreeMemoryWrittenCount].objectSize = (uint)size;
  7377. bulkFreeMemoryWrittenCount++;
  7378. if (bulkFreeMemoryWrittenCount == Recycler::BulkFreeMemoryCount)
  7379. {
  7380. FlushFreeRecord();
  7381. Assert(bulkFreeMemoryWrittenCount == 0);
  7382. }
  7383. }
  7384. #endif
  7385. #ifdef PROFILE_EXEC
  7386. ArenaAllocator *
  7387. Recycler::AddBackgroundProfilerArena()
  7388. {
  7389. return this->backgroundProfilerArena.PrependNode(&HeapAllocator::Instance,
  7390. _u("BgGCProfiler"), &this->backgroundProfilerPageAllocator, Js::Throw::OutOfMemory);
  7391. }
  7392. void
  7393. Recycler::ReleaseBackgroundProfilerArena(ArenaAllocator * arena)
  7394. {
  7395. this->backgroundProfilerArena.RemoveElement(&HeapAllocator::Instance, arena);
  7396. }
  7397. void
  7398. Recycler::SetProfiler(Js::Profiler * profiler, Js::Profiler * backgroundProfiler)
  7399. {
  7400. this->profiler = profiler;
  7401. this->backgroundProfiler = backgroundProfiler;
  7402. }
  7403. #endif
  7404. void Recycler::SetObjectBeforeCollectCallback(void* object,
  7405. ObjectBeforeCollectCallback callback,
  7406. void* callbackState,
  7407. ObjectBeforeCollectCallbackWrapper callbackWrapper,
  7408. void* threadContext)
  7409. {
  7410. if (objectBeforeCollectCallbackState == ObjectBeforeCollectCallback_Shutdown)
  7411. {
  7412. return; // NOP at shutdown
  7413. }
  7414. if (objectBeforeCollectCallbackMap == nullptr)
  7415. {
  7416. if (callback == nullptr) return;
  7417. objectBeforeCollectCallbackMap = HeapNew(ObjectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7418. }
  7419. // only allow 1 callback per object
  7420. objectBeforeCollectCallbackMap->Item(object, ObjectBeforeCollectCallbackData(callbackWrapper, callback, callbackState, threadContext));
  7421. if (callback != nullptr && this->IsInObjectBeforeCollectCallback()) // revive
  7422. {
  7423. this->ScanMemory<false>(&object, sizeof(object));
  7424. this->ProcessMark(/*background*/false);
  7425. }
  7426. }
  7427. bool Recycler::ProcessObjectBeforeCollectCallbacks(bool atShutdown/*= false*/)
  7428. {
  7429. if (this->objectBeforeCollectCallbackMap == nullptr)
  7430. {
  7431. return false; // no callbacks
  7432. }
  7433. Assert(atShutdown || this->IsMarkState());
  7434. Assert(!this->IsInObjectBeforeCollectCallback());
  7435. AutoRestoreValue<ObjectBeforeCollectCallbackState> autoInObjectBeforeCollectCallback(&objectBeforeCollectCallbackState,
  7436. atShutdown ? ObjectBeforeCollectCallback_Shutdown: ObjectBeforeCollectCallback_Normal);
  7437. // The callbacks may register/unregister callbacks while we are enumerating the current map. To avoid
  7438. // conflicting usage of the callback map, we swap it out. New registration will go to a new map.
  7439. AutoAllocatorObjectPtr<ObjectBeforeCollectCallbackMap, HeapAllocator> oldCallbackMap(
  7440. this->objectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7441. this->objectBeforeCollectCallbackMap = nullptr;
  7442. bool hasRemainingCallbacks = false;
  7443. oldCallbackMap->MapAndRemoveIf([&](const ObjectBeforeCollectCallbackMap::EntryType& entry)
  7444. {
  7445. const ObjectBeforeCollectCallbackData& data = entry.Value();
  7446. if (data.callback != nullptr)
  7447. {
  7448. void* object = entry.Key();
  7449. if (atShutdown || !this->IsObjectMarked(object))
  7450. {
  7451. if (data.callbackWrapper != nullptr)
  7452. {
  7453. data.callbackWrapper(data.callback, object, data.callbackState, data.threadContext);
  7454. }
  7455. else
  7456. {
  7457. data.callback(object, data.callbackState);
  7458. }
  7459. }
  7460. else
  7461. {
  7462. hasRemainingCallbacks = true;
  7463. return false; // Do not remove this entry, remaining callback for future
  7464. }
  7465. }
  7466. return true; // Remove this entry
  7467. });
  7468. // Merge back remaining callbacks if any
  7469. if (hasRemainingCallbacks)
  7470. {
  7471. if (this->objectBeforeCollectCallbackMap == nullptr)
  7472. {
  7473. this->objectBeforeCollectCallbackMap = oldCallbackMap.Detach();
  7474. }
  7475. else
  7476. {
  7477. if (oldCallbackMap->Count() > this->objectBeforeCollectCallbackMap->Count())
  7478. {
  7479. // Swap so that oldCallbackMap is the smaller one
  7480. ObjectBeforeCollectCallbackMap* tmp = oldCallbackMap.Detach();
  7481. *&oldCallbackMap = this->objectBeforeCollectCallbackMap;
  7482. this->objectBeforeCollectCallbackMap = tmp;
  7483. }
  7484. oldCallbackMap->Map([&](void* object, const ObjectBeforeCollectCallbackData& data)
  7485. {
  7486. this->objectBeforeCollectCallbackMap->Item(object, data);
  7487. });
  7488. }
  7489. }
  7490. return true; // maybe called callbacks
  7491. }
  7492. void Recycler::ClearObjectBeforeCollectCallbacks()
  7493. {
  7494. // This is called at shutting down. All objects will be gone. Invoke each registered callback if any.
  7495. ProcessObjectBeforeCollectCallbacks(/*atShutdown*/true);
  7496. Assert(objectBeforeCollectCallbackMap == nullptr);
  7497. }
  7498. #ifdef RECYCLER_TEST_SUPPORT
  7499. void Recycler::SetCheckFn(BOOL(*checkFn)(char* addr, size_t size))
  7500. {
  7501. Assert(BinaryFeatureControl::RecyclerTest());
  7502. this->EnsureNotCollecting();
  7503. this->checkFn = checkFn;
  7504. }
  7505. #endif
  7506. void
  7507. Recycler::NotifyFree(__in char *address, size_t size)
  7508. {
  7509. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Sweeping object %p\n"), address);
  7510. #ifdef RECYCLER_TEST_SUPPORT
  7511. if (BinaryFeatureControl::RecyclerTest())
  7512. {
  7513. if (checkFn != NULL)
  7514. checkFn(address, size);
  7515. }
  7516. #endif
  7517. #ifdef ENABLE_JS_ETW
  7518. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY())
  7519. {
  7520. AppendFreeMemoryETWRecord(address, (UINT)size);
  7521. }
  7522. #endif
  7523. RecyclerMemoryTracking::ReportFree(this, address, size);
  7524. RECYCLER_PERF_COUNTER_DEC(LiveObject);
  7525. RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, size);
  7526. RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, size);
  7527. if (HeapInfo::IsSmallBlockAllocation(HeapInfo::GetAlignedSizeNoCheck(size)))
  7528. {
  7529. RECYCLER_PERF_COUNTER_DEC(SmallHeapBlockLiveObject);
  7530. RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockLiveObjectSize, size);
  7531. RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockFreeObjectSize, size);
  7532. }
  7533. else
  7534. {
  7535. RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockLiveObject);
  7536. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, size);
  7537. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockFreeObjectSize, size);
  7538. }
  7539. #ifdef RECYCLER_MEMORY_VERIFY
  7540. if (this->VerifyEnabled())
  7541. {
  7542. VerifyCheckPad(address, size);
  7543. }
  7544. #endif
  7545. #ifdef PROFILE_RECYCLER_ALLOC
  7546. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  7547. {
  7548. TrackFree(address, size);
  7549. }
  7550. #endif
  7551. #ifdef RECYCLER_STATS
  7552. collectionStats.objectSweptCount++;
  7553. collectionStats.objectSweptBytes += size;
  7554. if (!isForceSweeping)
  7555. {
  7556. collectionStats.objectSweptFreeListCount++;
  7557. collectionStats.objectSweptFreeListBytes += size;
  7558. }
  7559. #endif
  7560. }
  7561. #if GLOBAL_ENABLE_WRITE_BARRIER
  7562. void
  7563. Recycler::RegisterPendingWriteBarrierBlock(void* address, size_t bytes)
  7564. {
  7565. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  7566. {
  7567. #if DBG
  7568. WBSetBitRange((char*)address, (uint)bytes/sizeof(void*));
  7569. #endif
  7570. pendingWriteBarrierBlockMap.Item(address, bytes);
  7571. RecyclerWriteBarrierManager::WriteBarrier(address, bytes);
  7572. }
  7573. }
  7574. void
  7575. Recycler::UnRegisterPendingWriteBarrierBlock(void* address)
  7576. {
  7577. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  7578. {
  7579. pendingWriteBarrierBlockMap.Remove(address);
  7580. }
  7581. }
  7582. #endif
  7583. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  7584. void
  7585. Recycler::WBVerifyBitIsSet(char* addr, char* target)
  7586. {
  7587. AutoCriticalSection lock(&recyclerListLock);
  7588. Recycler* recycler = Recycler::recyclerList;
  7589. while (recycler)
  7590. {
  7591. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7592. if (heapBlock)
  7593. {
  7594. heapBlock->WBVerifyBitIsSet(addr);
  7595. break;
  7596. }
  7597. recycler = recycler->next;
  7598. }
  7599. }
  7600. void
  7601. Recycler::WBSetBit(char* addr)
  7602. {
  7603. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  7604. {
  7605. AutoCriticalSection lock(&recyclerListLock);
  7606. Recycler* recycler = Recycler::recyclerList;
  7607. while (recycler)
  7608. {
  7609. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7610. if (heapBlock)
  7611. {
  7612. heapBlock->WBSetBit(addr);
  7613. break;
  7614. }
  7615. recycler = recycler->next;
  7616. }
  7617. }
  7618. }
  7619. void
  7620. Recycler::WBSetBitRange(char* addr, uint count)
  7621. {
  7622. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  7623. {
  7624. AutoCriticalSection lock(&recyclerListLock);
  7625. Recycler* recycler = Recycler::recyclerList;
  7626. while (recycler)
  7627. {
  7628. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7629. if (heapBlock)
  7630. {
  7631. heapBlock->WBSetBitRange(addr, count);
  7632. break;
  7633. }
  7634. recycler = recycler->next;
  7635. }
  7636. }
  7637. }
  7638. bool
  7639. Recycler::WBCheckIsRecyclerAddress(char* addr)
  7640. {
  7641. AutoCriticalSection lock(&recyclerListLock);
  7642. Recycler* recycler = Recycler::recyclerList;
  7643. while (recycler)
  7644. {
  7645. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7646. if (heapBlock)
  7647. {
  7648. return true;
  7649. }
  7650. recycler = recycler->next;
  7651. }
  7652. return false;
  7653. }
  7654. #endif
  7655. size_t
  7656. RecyclerHeapObjectInfo::GetSize() const
  7657. {
  7658. Assert(m_heapBlock);
  7659. size_t size;
  7660. #if LARGEHEAPBLOCK_ENCODING
  7661. if (isUsingLargeHeapBlock)
  7662. {
  7663. size = m_largeHeapBlockHeader->objectSize;
  7664. }
  7665. #else
  7666. if (m_heapBlock->IsLargeHeapBlock())
  7667. {
  7668. size = ((LargeHeapBlock*)m_heapBlock)->GetObjectSize(m_address);
  7669. }
  7670. #endif
  7671. else
  7672. {
  7673. // All small heap block types have the same layout for the object size field.
  7674. size = ((SmallHeapBlock*)m_heapBlock)->GetObjectSize();
  7675. }
  7676. #ifdef RECYCLER_MEMORY_VERIFY
  7677. if (m_recycler->VerifyEnabled())
  7678. {
  7679. size -= *(size_t *)(((char *)m_address) + size - sizeof(size_t));
  7680. }
  7681. #endif
  7682. return size;
  7683. }
  7684. template char* Recycler::AllocWithAttributesInlined<(Memory::ObjectInfoBits)32, false>(size_t);
  7685. #ifdef RECYCLER_VISITED_HOST
  7686. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedFinalizableBits, /* nothrow = */true>(size_t);
  7687. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostFinalizableBits, /* nothrow = */true>(size_t);
  7688. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedBits, /* nothrow = */true>(size_t);
  7689. template char* Recycler::AllocZeroWithAttributesInlined<LeafBit, /* nothrow = */true>(size_t);
  7690. #endif