Recycler.cpp 303 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_AMD64
  7. #include "amd64.h"
  8. #endif
  9. #ifdef _M_ARM
  10. #include "arm.h"
  11. #endif
  12. #ifdef _M_ARM64
  13. #include "arm64.h"
  14. #endif
  15. #include "Core/BinaryFeatureControl.h"
  16. #include "Common/ThreadService.h"
  17. #include "Memory/AutoAllocatorObjectPtr.h"
  18. #include "Common/Tick.h"
  19. DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
  20. #ifdef PROFILE_RECYCLER_ALLOC
  21. struct UnallocatedPortionOfBumpAllocatedBlock
  22. {
  23. };
  24. struct ExplicitFreeListedObject
  25. {
  26. };
  27. Recycler::TrackerData Recycler::TrackerData::EmptyData(&typeid(UnallocatedPortionOfBumpAllocatedBlock), false);
  28. Recycler::TrackerData Recycler::TrackerData::ExplicitFreeListObjectData(&typeid(ExplicitFreeListedObject), false);
  29. #endif
  30. DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
  31. inline bool
  32. DefaultRecyclerCollectionWrapper::IsCollectionDisabled(Recycler * recycler)
  33. {
  34. // GC shouldn't be triggered during heap enum, unless we missed a case where it allocate memory (which
  35. // shouldn't happen during heap enum) or for the case we explicitly allow allocation
  36. // REVIEW: isHeapEnumInProgress should have been a collection state and checked before to avoid a check here.
  37. // Collection will be disabled in VarDispEx because it could be called from projection re-entrance as ASTA allows
  38. // QI/AddRef/Release to come back.
  39. bool collectionDisabled = recycler->IsCollectionDisabled();
  40. #if DBG
  41. if (collectionDisabled)
  42. {
  43. // disabled collection should only happen if we allowed allocation during heap enum
  44. if (recycler->IsHeapEnumInProgress())
  45. {
  46. Assert(recycler->AllowAllocationDuringHeapEnum());
  47. }
  48. else
  49. {
  50. #ifdef ENABLE_PROJECTION
  51. Assert(recycler->IsInRefCountTrackingForProjection());
  52. #else
  53. Assert(false);
  54. #endif
  55. }
  56. }
  57. #endif
  58. return collectionDisabled;
  59. }
  60. BOOL DefaultRecyclerCollectionWrapper::ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags)
  61. {
  62. if (IsCollectionDisabled(recycler))
  63. {
  64. return FALSE;
  65. }
  66. BOOL ret = FALSE;
  67. BEGIN_NO_EXCEPTION
  68. {
  69. ret = (recycler->*(function))(flags);
  70. }
  71. END_NO_EXCEPTION;
  72. return ret;
  73. }
  74. void
  75. DefaultRecyclerCollectionWrapper::DisposeObjects(Recycler * recycler)
  76. {
  77. if (IsCollectionDisabled(recycler))
  78. {
  79. return;
  80. }
  81. BEGIN_NO_EXCEPTION
  82. {
  83. recycler->DisposeObjects();
  84. }
  85. END_NO_EXCEPTION;
  86. }
  87. static void* GetStackBase();
  88. template _ALWAYSINLINE char * Recycler::AllocWithAttributesInlined<NoBit, false>(size_t size);
  89. template _ALWAYSINLINE char* Recycler::RealAlloc<NoBit, false>(HeapInfo* heap, size_t size);
  90. template _ALWAYSINLINE _Ret_notnull_ void * __cdecl operator new<Recycler>(size_t byteSize, Recycler * alloc, char * (Recycler::*AllocFunc)(size_t));
  91. Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllocator * pageAllocator, void (*outOfMemoryFunc)(), Js::ConfigFlagsTable& configFlagsTable, RecyclerTelemetryHostInterface* hostInterface) :
  92. collectionStateChangedObserver(this),
  93. collectionState(CollectionStateNotCollecting, &collectionStateChangedObserver),
  94. recyclerFlagsTable(configFlagsTable),
  95. autoHeap(policyManager, configFlagsTable, pageAllocator),
  96. #ifdef ENABLE_JS_ETW
  97. collectionStartReason(ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown),
  98. collectionFinishReason(ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown),
  99. #endif
  100. threadService(nullptr),
  101. markPagePool(configFlagsTable),
  102. parallelMarkPagePool1(configFlagsTable),
  103. parallelMarkPagePool2(configFlagsTable),
  104. parallelMarkPagePool3(configFlagsTable),
  105. markContext(this, &this->markPagePool),
  106. parallelMarkContext1(this, &this->parallelMarkPagePool1),
  107. parallelMarkContext2(this, &this->parallelMarkPagePool2),
  108. parallelMarkContext3(this, &this->parallelMarkPagePool3),
  109. #if ENABLE_PARTIAL_GC
  110. clientTrackedObjectAllocator(_u("CTO-List"), pageAllocator, Js::Throw::OutOfMemory),
  111. #endif
  112. outOfMemoryFunc(outOfMemoryFunc),
  113. #ifdef RECYCLER_TEST_SUPPORT
  114. checkFn(NULL),
  115. #endif
  116. externalRootMarker(NULL),
  117. externalRootMarkerContext(NULL),
  118. recyclerSweepManager(nullptr),
  119. inEndMarkOnLowMemory(false),
  120. enableScanInteriorPointers(CUSTOM_CONFIG_FLAG(configFlagsTable, RecyclerForceMarkInterior)),
  121. enableScanImplicitRoots(false),
  122. disableCollectOnAllocationHeuristics(false),
  123. skipStack(false),
  124. mainThreadHandle(NULL),
  125. #if ENABLE_CONCURRENT_GC
  126. backgroundFinishMarkCount(0),
  127. hasPendingUnpinnedObject(false),
  128. hasPendingConcurrentFindRoot(false),
  129. queueTrackedObject(false),
  130. enableConcurrentMark(false), // Default to non-concurrent
  131. enableParallelMark(false),
  132. enableConcurrentSweep(false),
  133. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  134. allowAllocationsDuringConcurrentSweepForCollection(false),
  135. #endif
  136. concurrentThread(NULL),
  137. concurrentWorkReadyEvent(NULL),
  138. concurrentWorkDoneEvent(NULL),
  139. parallelThread1(this, &Recycler::ParallelWorkFunc<0>),
  140. parallelThread2(this, &Recycler::ParallelWorkFunc<1>),
  141. priorityBoost(false),
  142. isAborting(false),
  143. #if DBG
  144. concurrentThreadExited(true),
  145. isProcessingTrackedObjects(false),
  146. hasIncompleteDoCollect(false),
  147. isConcurrentGCOnIdle(false),
  148. isFinishGCOnIdle(false),
  149. #endif
  150. #ifdef IDLE_DECOMMIT_ENABLED
  151. concurrentIdleDecommitEvent(nullptr),
  152. #endif
  153. #endif
  154. #if DBG
  155. isExternalStackSkippingGC(false),
  156. isProcessingRescan(false),
  157. #endif
  158. #if ENABLE_PARTIAL_GC
  159. inPartialCollectMode(false),
  160. scanPinnedObjectMap(false),
  161. partialUncollectedAllocBytes(0),
  162. uncollectedNewPageCountPartialCollect((size_t)-1),
  163. #if ENABLE_CONCURRENT_GC
  164. partialConcurrentNextCollection(false),
  165. #endif
  166. #ifdef RECYCLER_STRESS
  167. forcePartialScanStack(false),
  168. #endif
  169. #endif
  170. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  171. isPrimaryMarkContextInitialized(false),
  172. #endif
  173. allowDispose(false),
  174. inDisposeWrapper(false),
  175. hasDisposableObject(false),
  176. hasNativeGCHost(false),
  177. tickCountNextDispose(0),
  178. transientPinnedObject(nullptr),
  179. pinnedObjectMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  180. weakReferenceMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  181. weakReferenceCleanupId(0),
  182. #if ENABLE_WEAK_REFERENCE_REGIONS
  183. weakReferenceRegionList(HeapAllocator::GetNoMemProtectInstance()),
  184. #endif
  185. collectionWrapper(&DefaultRecyclerCollectionWrapper::Instance),
  186. isScriptActive(false),
  187. isInScript(false),
  188. isShuttingDown(false),
  189. inExhaustiveCollection(false),
  190. hasExhaustiveCandidate(false),
  191. inDecommitNowCollection(false),
  192. inCacheCleanupCollection(false),
  193. hasPendingDeleteGuestArena(false),
  194. needOOMRescan(false),
  195. #if ENABLE_CONCURRENT_GC && ENABLE_PARTIAL_GC
  196. hasBackgroundFinishPartial(false),
  197. #endif
  198. decommitOnFinish(false)
  199. #ifdef PROFILE_EXEC
  200. , profiler(nullptr)
  201. , backgroundProfiler(nullptr)
  202. , backgroundProfilerPageAllocator(nullptr, configFlagsTable, PageAllocatorType_GCThread)
  203. , backgroundProfilerArena()
  204. #endif
  205. #ifdef PROFILE_MEM
  206. , memoryData(nullptr)
  207. #endif
  208. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  209. , objectGraphDumper(nullptr)
  210. , dumpObjectOnceOnCollect(false)
  211. #endif
  212. #ifdef PROFILE_RECYCLER_ALLOC
  213. , trackerDictionary(nullptr)
  214. #endif
  215. #ifdef HEAP_ENUMERATION_VALIDATION
  216. ,pfPostHeapEnumScanCallback(nullptr)
  217. #endif
  218. #ifdef NTBUILD
  219. , telemetryBlock(&localTelemetryBlock)
  220. #endif
  221. #ifdef ENABLE_BASIC_TELEMETRY
  222. , telemetryStats(this, hostInterface)
  223. #endif
  224. #ifdef ENABLE_JS_ETW
  225. ,bulkFreeMemoryWrittenCount(0)
  226. #endif
  227. #ifdef RECYCLER_PAGE_HEAP
  228. , isPageHeapEnabled(false)
  229. , capturePageHeapAllocStack(false)
  230. , capturePageHeapFreeStack(false)
  231. #endif
  232. , objectBeforeCollectCallbackMap(nullptr)
  233. , objectBeforeCollectCallbackState(ObjectBeforeCollectCallback_None)
  234. #if GLOBAL_ENABLE_WRITE_BARRIER
  235. , pendingWriteBarrierBlockMap(&HeapAllocator::Instance)
  236. #endif
  237. #ifdef PROFILE_RECYCLER_ALLOC
  238. , trackerCriticalSection(nullptr)
  239. #endif
  240. {
  241. #ifdef ENABLE_BASIC_TELEMETRY
  242. if (CoCreateGuid(&recyclerID) != S_OK)
  243. {
  244. // CoCreateGuid failed
  245. recyclerID = { 0 };
  246. }
  247. this->GetHeapInfo()->GetRecyclerPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetThreadPageAllocator_decommitStats());
  248. this->GetHeapInfo()->GetRecyclerLeafPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerLeafPageAllocator_decommitStats());
  249. this->GetHeapInfo()->GetRecyclerLargeBlockPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerLargeBlockPageAllocator_decommitStats());
  250. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  251. this->GetHeapInfo()->GetRecyclerWithBarrierPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerWithBarrierPageAllocator_decommitStats());
  252. #endif
  253. #endif
  254. #ifdef RECYCLER_MARK_TRACK
  255. this->markMap = NoCheckHeapNew(MarkMap, &NoCheckHeapAllocator::Instance, 163, &markMapCriticalSection);
  256. markContext.SetMarkMap(markMap);
  257. parallelMarkContext1.SetMarkMap(markMap);
  258. parallelMarkContext2.SetMarkMap(markMap);
  259. parallelMarkContext3.SetMarkMap(markMap);
  260. #endif
  261. #ifdef RECYCLER_MEMORY_VERIFY
  262. verifyPad = GetRecyclerFlagsTable().RecyclerVerifyPadSize;
  263. verifyEnabled = GetRecyclerFlagsTable().IsEnabled(Js::RecyclerVerifyFlag);
  264. if (verifyEnabled)
  265. {
  266. autoHeap.EnableVerify();
  267. }
  268. #endif
  269. #ifdef RECYCLER_NO_PAGE_REUSE
  270. if (GetRecyclerFlagsTable().RecyclerNoPageReuse)
  271. {
  272. autoHeap.DisablePageReuse();
  273. }
  274. #endif
  275. this->inDispose = false;
  276. #if DBG
  277. this->heapBlockCount = 0;
  278. this->disableThreadAccessCheck = false;
  279. #if ENABLE_CONCURRENT_GC
  280. this->disableConcurrentThreadExitedCheck = false;
  281. #endif
  282. #endif
  283. #if DBG || defined RECYCLER_TRACE
  284. this->collectionCount = 0;
  285. this->inResolveExternalWeakReferences = false;
  286. #endif
  287. #if DBG || defined(RECYCLER_STATS)
  288. isForceSweeping = false;
  289. #endif
  290. #ifdef RECYCLER_FINALIZE_CHECK
  291. collectionStats.finalizeCount = 0;
  292. #endif
  293. RecyclerMemoryTracking::ReportRecyclerCreate(this);
  294. #if DBG_DUMP
  295. forceTraceMark = false;
  296. #endif
  297. isHeapEnumInProgress = false;
  298. isCollectionDisabled = false;
  299. #if DBG
  300. allowAllocationDuringRenentrance = false;
  301. allowAllocationDuringHeapEnum = false;
  302. #ifdef ENABLE_PROJECTION
  303. isInRefCountTrackingForProjection = false;
  304. #endif
  305. #endif
  306. ScheduleNextCollection();
  307. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  308. this->inDllCanUnloadNow = false;
  309. this->inDetachProcess = false;
  310. #endif
  311. #ifdef NTBUILD
  312. memset(&localTelemetryBlock, 0, sizeof(localTelemetryBlock));
  313. #endif
  314. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  315. // recycler requires at least Recycler::PrimaryMarkStackReservedPageCount to function properly for the main mark context
  316. this->markContext.SetMaxPageCount(max(static_cast<size_t>(GetRecyclerFlagsTable().MaxMarkStackPageCount), static_cast<size_t>(Recycler::PrimaryMarkStackReservedPageCount)));
  317. this->parallelMarkContext1.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  318. this->parallelMarkContext2.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  319. this->parallelMarkContext3.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  320. if (GetRecyclerFlagsTable().IsEnabled(Js::GCMemoryThresholdFlag))
  321. {
  322. // Note, we can't do this in the constructor for RecyclerHeuristic::Instance because it runs before config is processed
  323. RecyclerHeuristic::Instance.ConfigureBaseFactor(GetRecyclerFlagsTable().GCMemoryThreshold);
  324. }
  325. #endif
  326. }
  327. #if DBG
  328. void
  329. Recycler::SetDisableThreadAccessCheck()
  330. {
  331. autoHeap.SetDisableThreadAccessCheck();
  332. disableThreadAccessCheck = true;
  333. }
  334. #endif
  335. void
  336. Recycler::SetMemProtectMode()
  337. {
  338. this->enableScanInteriorPointers = true;
  339. this->enableScanImplicitRoots = true;
  340. this->disableCollectOnAllocationHeuristics = true;
  341. #ifdef RECYCLER_STRESS
  342. this->recyclerStress = GetRecyclerFlagsTable().MemProtectHeapStress;
  343. #if ENABLE_CONCURRENT_GC
  344. this->recyclerBackgroundStress = GetRecyclerFlagsTable().MemProtectHeapBackgroundStress;
  345. this->recyclerConcurrentStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentStress;
  346. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentRepeatStress;
  347. #endif
  348. #if ENABLE_PARTIAL_GC
  349. this->recyclerPartialStress = GetRecyclerFlagsTable().MemProtectHeapPartialStress;
  350. #endif
  351. #endif
  352. }
  353. void
  354. Recycler::LogMemProtectHeapSize(bool fromGC)
  355. {
  356. Assert(IsMemProtectMode());
  357. #ifdef ENABLE_JS_ETW
  358. if (IS_JS_ETW(EventEnabledMEMPROTECT_GC_HEAP_SIZE()))
  359. {
  360. size_t usedBytes = autoHeap.GetUsedBytes();
  361. size_t reservedBytes = autoHeap.GetReservedBytes();
  362. size_t committedBytes = autoHeap.GetCommittedBytes();
  363. size_t numberOfSegments = autoHeap.GetNumberOfSegments();
  364. JS_ETW(EventWriteMEMPROTECT_GC_HEAP_SIZE(this, usedBytes, reservedBytes, committedBytes, numberOfSegments, fromGC));
  365. }
  366. #endif
  367. }
  368. #if DBG
  369. void
  370. Recycler::SetDisableConcurrentThreadExitedCheck()
  371. {
  372. #if ENABLE_CONCURRENT_GC
  373. disableConcurrentThreadExitedCheck = true;
  374. #endif
  375. #ifdef RECYCLER_STRESS
  376. this->recyclerStress = false;
  377. #if ENABLE_CONCURRENT_GC
  378. this->recyclerBackgroundStress = false;
  379. this->recyclerConcurrentStress = false;
  380. this->recyclerConcurrentRepeatStress = false;
  381. #endif
  382. #if ENABLE_PARTIAL_GC
  383. this->recyclerPartialStress = false;
  384. #endif
  385. #endif
  386. }
  387. #endif
  388. #if DBG
  389. void
  390. Recycler::ResetThreadId()
  391. {
  392. autoHeap.ResetThreadId();
  393. #if ENABLE_CONCURRENT_GC
  394. if (this->IsConcurrentEnabled())
  395. {
  396. markContext.GetPageAllocator()->ClearConcurrentThreadId();
  397. }
  398. #endif
  399. #if defined(DBG) && defined(PROFILE_EXEC)
  400. this->backgroundProfilerPageAllocator.ClearConcurrentThreadId();
  401. #endif
  402. }
  403. #endif
  404. Recycler::~Recycler()
  405. {
  406. #if ENABLE_CONCURRENT_GC
  407. Assert(!this->isAborting);
  408. #endif
  409. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  410. recyclerListLock.Enter();
  411. if (recyclerList == this)
  412. {
  413. recyclerList = this->next;
  414. }
  415. else if(recyclerList)
  416. {
  417. Recycler* list = recyclerList;
  418. while (list->next != this)
  419. {
  420. list = list->next;
  421. }
  422. list->next = this->next;
  423. }
  424. recyclerListLock.Leave();
  425. #endif
  426. // Stop any further collection
  427. this->isShuttingDown = true;
  428. #if DBG
  429. this->ResetThreadId();
  430. #endif
  431. #ifdef ENABLE_JS_ETW
  432. FlushFreeRecord();
  433. #endif
  434. ClearObjectBeforeCollectCallbacks();
  435. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  436. if (GetRecyclerFlagsTable().DumpObjectGraphOnExit)
  437. {
  438. // Always skip stack here, as we may be running the dtor on another thread.
  439. RecyclerObjectGraphDumper::Param param = { 0 };
  440. param.skipStack = true;
  441. this->DumpObjectGraph(&param);
  442. }
  443. #endif
  444. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): %s"), this, this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") :
  445. this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  446. #ifdef LEAK_REPORT
  447. ReportLeaks();
  448. #endif
  449. #ifdef CHECK_MEMORY_LEAK
  450. CheckLeaks(this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") : this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  451. #endif
  452. AUTO_LEAK_REPORT_SECTION_0(this->GetRecyclerFlagsTable(), _u("Skipped finalizers"));
  453. #if ENABLE_CONCURRENT_GC
  454. Assert(concurrentThread == nullptr);
  455. // We only sometime clean up the state after abort concurrent to not collection
  456. // Still need to delete heap block that is held by the recyclerSweep
  457. if (recyclerSweepManager != nullptr)
  458. {
  459. recyclerSweepManager->ShutdownCleanup();
  460. recyclerSweepManager = nullptr;
  461. }
  462. if (mainThreadHandle != nullptr)
  463. {
  464. CloseHandle(mainThreadHandle);
  465. }
  466. #endif
  467. autoHeap.Close();
  468. markContext.Release();
  469. parallelMarkContext1.Release();
  470. parallelMarkContext2.Release();
  471. parallelMarkContext3.Release();
  472. // Clean up the weak reference map so that
  473. // objects being finalized can safely refer to weak references
  474. // (this could otherwise become a problem for weak references held
  475. // to large objects since their block would be destroyed before
  476. // the finalizer was run)
  477. // When the recycler is shutting down, all objects are going to be reclaimed
  478. // so null out the weak references so that anyone relying on weak
  479. // references simply thinks the object has been reclaimed
  480. weakReferenceMap.Map([](RecyclerWeakReferenceBase * weakRef) -> bool
  481. {
  482. weakRef->strongRef = nullptr;
  483. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  484. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  485. // Remove
  486. return false;
  487. });
  488. #if ENABLE_PARTIAL_GC
  489. clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  490. #endif
  491. #ifdef PROFILE_RECYCLER_ALLOC
  492. if (trackerDictionary != nullptr)
  493. {
  494. this->trackerDictionary->Map([](type_info const *, TrackerItem * item)
  495. {
  496. NoCheckHeapDelete(item);
  497. });
  498. NoCheckHeapDelete(this->trackerDictionary);
  499. this->trackerDictionary = nullptr;
  500. delete(trackerCriticalSection);
  501. }
  502. #endif
  503. #ifdef RECYCLER_MARK_TRACK
  504. NoCheckHeapDelete(this->markMap);
  505. this->markMap = nullptr;
  506. #endif
  507. #if DBG
  508. // Disable idle decommit asserts
  509. autoHeap.ShutdownIdleDecommit();
  510. #endif
  511. Assert(this->collectionState == CollectionStateExit || this->collectionState == CollectionStateNotCollecting);
  512. #if ENABLE_CONCURRENT_GC
  513. Assert(this->disableConcurrentThreadExitedCheck || this->concurrentThreadExited == true);
  514. #endif
  515. }
  516. void
  517. Recycler::SetIsThreadBound()
  518. {
  519. Assert(mainThreadHandle == nullptr);
  520. ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), &mainThreadHandle,
  521. 0, FALSE, DUPLICATE_SAME_ACCESS);
  522. stackBase = GetStackBase();
  523. }
  524. void
  525. Recycler::RootAddRef(void* obj, uint *count)
  526. {
  527. Assert(this->IsValidObject(obj));
  528. if (transientPinnedObject)
  529. {
  530. PinRecord& refCount = pinnedObjectMap.GetReference(transientPinnedObject);
  531. ++refCount;
  532. if (refCount == 1)
  533. {
  534. this->scanPinnedObjectMap = true;
  535. RECYCLER_PERF_COUNTER_INC(PinnedObject);
  536. }
  537. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  538. #ifdef STACK_BACK_TRACE
  539. if (GetRecyclerFlagsTable().LeakStackTrace)
  540. {
  541. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount.stackBackTraces,
  542. transientPinnedObjectStackBackTrace);
  543. }
  544. #endif
  545. #endif
  546. }
  547. if (count != nullptr)
  548. {
  549. PinRecord* refCount = pinnedObjectMap.TryGetReference(obj);
  550. *count = (refCount != nullptr) ? (*refCount + 1) : 1;
  551. }
  552. transientPinnedObject = obj;
  553. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  554. #ifdef STACK_BACK_TRACE
  555. if (GetRecyclerFlagsTable().LeakStackTrace)
  556. {
  557. transientPinnedObjectStackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance);
  558. }
  559. #endif
  560. #endif
  561. }
  562. void
  563. Recycler::RootRelease(void* obj, uint *count)
  564. {
  565. Assert(this->IsValidObject(obj));
  566. if (transientPinnedObject == obj)
  567. {
  568. transientPinnedObject = nullptr;
  569. if (count != nullptr)
  570. {
  571. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  572. *count = (refCount != nullptr) ? *refCount : 0;
  573. }
  574. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  575. #ifdef STACK_BACK_TRACE
  576. if (GetRecyclerFlagsTable().LeakStackTrace)
  577. {
  578. transientPinnedObjectStackBackTrace->Delete(&NoCheckHeapAllocator::Instance);
  579. }
  580. #endif
  581. #endif
  582. }
  583. else
  584. {
  585. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  586. if (refCount == nullptr)
  587. {
  588. if (count != nullptr)
  589. {
  590. *count = (uint)-1;
  591. }
  592. // REVIEW: throw if not found
  593. Assert(false);
  594. return;
  595. }
  596. uint newRefCount = (--(*refCount));
  597. if (count != nullptr)
  598. {
  599. *count = newRefCount;
  600. }
  601. if (newRefCount != 0)
  602. {
  603. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  604. #ifdef STACK_BACK_TRACE
  605. if (GetRecyclerFlagsTable().LeakStackTrace)
  606. {
  607. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces,
  608. StackBackTrace::Capture(&NoCheckHeapAllocator::Instance));
  609. }
  610. #endif
  611. #endif
  612. return;
  613. }
  614. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  615. #ifdef STACK_BACK_TRACE
  616. StackBackTraceNode::DeleteAll(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces);
  617. refCount->stackBackTraces = nullptr;
  618. #endif
  619. #endif
  620. #if ENABLE_CONCURRENT_GC
  621. // Don't delete the entry if we are in concurrent find root state
  622. // We will delete it later on in-thread find root
  623. if (this->hasPendingConcurrentFindRoot)
  624. {
  625. this->hasPendingUnpinnedObject = true;
  626. }
  627. else
  628. #endif
  629. {
  630. pinnedObjectMap.Remove(obj);
  631. }
  632. RECYCLER_PERF_COUNTER_DEC(PinnedObject);
  633. }
  634. // Any time a root is removed during a GC, it indicates that an exhaustive
  635. // collection is likely going to have work to do so trigger an exhaustive
  636. // candidate GC to indicate this fact
  637. this->CollectNow<CollectExhaustiveCandidate>();
  638. }
  639. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  640. Recycler* Recycler::recyclerList = nullptr;
  641. CriticalSection Recycler::recyclerListLock;
  642. #endif
  643. void
  644. Recycler::Initialize(const bool forceInThread, JsUtil::ThreadService *threadService, const bool deferThreadStartup
  645. #ifdef RECYCLER_PAGE_HEAP
  646. , PageHeapMode pageheapmode
  647. , bool captureAllocCallStack
  648. , bool captureFreeCallStack
  649. #endif
  650. )
  651. {
  652. #ifdef PROFILE_RECYCLER_ALLOC
  653. this->InitializeProfileAllocTracker();
  654. #endif
  655. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  656. this->disableCollection = CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::RecyclerPhase);
  657. #endif
  658. #if ENABLE_CONCURRENT_GC
  659. this->skipStack = false;
  660. #endif
  661. #if ENABLE_PARTIAL_GC
  662. #if ENABLE_DEBUG_CONFIG_OPTIONS
  663. this->enablePartialCollect = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::PartialCollectPhase);
  664. #else
  665. this->enablePartialCollect = true;
  666. #endif
  667. #endif
  668. #ifdef PROFILE_MEM
  669. this->memoryData = MemoryProfiler::GetRecyclerMemoryData();
  670. #endif
  671. #if DBG || DBG_DUMP || defined(RECYCLER_TRACE)
  672. mainThreadId = GetCurrentThreadContextId();
  673. #endif
  674. #ifdef RECYCLER_TRACE
  675. collectionParam.domCollect = false;
  676. #endif
  677. #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING)
  678. bool dontNeedDetailedTracking = false;
  679. #if defined(PROFILE_RECYCLER_ALLOC)
  680. dontNeedDetailedTracking = dontNeedDetailedTracking || this->trackerDictionary == nullptr;
  681. #endif
  682. #if defined(RECYCLER_MEMORY_VERIFY)
  683. dontNeedDetailedTracking = dontNeedDetailedTracking || !this->verifyEnabled;
  684. #endif
  685. // If we need detailed tracking we force allocation fast path in the JIT to fail and go to the helper, so there is no
  686. // need for the TrackNativeAllocatedMemoryBlock callback.
  687. if (dontNeedDetailedTracking)
  688. {
  689. autoHeap.Initialize(this, TrackNativeAllocatedMemoryBlock
  690. #ifdef RECYCLER_PAGE_HEAP
  691. , pageheapmode
  692. , captureAllocCallStack
  693. , captureFreeCallStack
  694. #endif
  695. );
  696. }
  697. else
  698. {
  699. autoHeap.Initialize(this
  700. #ifdef RECYCLER_PAGE_HEAP
  701. , pageheapmode
  702. , captureAllocCallStack
  703. , captureFreeCallStack
  704. #endif
  705. );
  706. }
  707. #else
  708. autoHeap.Initialize(this
  709. #ifdef RECYCLER_PAGE_HEAP
  710. , pageheapmode
  711. , captureAllocCallStack
  712. , captureFreeCallStack
  713. #endif
  714. );
  715. #endif
  716. markContext.Init(Recycler::PrimaryMarkStackReservedPageCount);
  717. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  718. isPrimaryMarkContextInitialized = true;
  719. #endif
  720. #ifdef RECYCLER_PAGE_HEAP
  721. isPageHeapEnabled = autoHeap.IsPageHeapEnabled();
  722. if (IsPageHeapEnabled())
  723. {
  724. capturePageHeapAllocStack = autoHeap.DoCaptureAllocCallStack();
  725. capturePageHeapFreeStack = autoHeap.DoCaptureFreeCallStack();
  726. }
  727. #endif
  728. #ifdef RECYCLER_STRESS
  729. #if ENABLE_PARTIAL_GC
  730. if (GetRecyclerFlagsTable().RecyclerTrackStress)
  731. {
  732. // Disable partial if we are doing track stress, since partial relies on ClientTracked processing
  733. // and track stress doesn't support this.
  734. this->enablePartialCollect = false;
  735. }
  736. #endif
  737. this->recyclerStress = GetRecyclerFlagsTable().RecyclerStress;
  738. #if ENABLE_CONCURRENT_GC
  739. this->recyclerBackgroundStress = GetRecyclerFlagsTable().RecyclerBackgroundStress;
  740. this->recyclerConcurrentStress = GetRecyclerFlagsTable().RecyclerConcurrentStress;
  741. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().RecyclerConcurrentRepeatStress;
  742. #endif
  743. #if ENABLE_PARTIAL_GC
  744. this->recyclerPartialStress = GetRecyclerFlagsTable().RecyclerPartialStress;
  745. #endif
  746. #endif
  747. bool needWriteWatch = false;
  748. #if ENABLE_CONCURRENT_GC
  749. // Default to non-concurrent
  750. uint numProcs = (uint)AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
  751. this->maxParallelism = (numProcs > 4) || CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) ? 4 : numProcs;
  752. if (forceInThread)
  753. {
  754. // Requested a non-concurrent recycler
  755. this->disableConcurrent = true;
  756. }
  757. #if ENABLE_DEBUG_CONFIG_OPTIONS
  758. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentCollectPhase))
  759. {
  760. // Concurrent collection disabled
  761. this->disableConcurrent = true;
  762. }
  763. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase) &&
  764. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) &&
  765. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase))
  766. {
  767. // All concurrent collection phases disabled
  768. this->disableConcurrent = true;
  769. }
  770. #endif
  771. else
  772. {
  773. this->disableConcurrent = false;
  774. if (deferThreadStartup || EnableConcurrent(threadService, false))
  775. {
  776. #ifdef RECYCLER_WRITE_WATCH
  777. needWriteWatch = true;
  778. #endif
  779. }
  780. }
  781. #endif // ENABLE_CONCURRENT_GC
  782. #if ENABLE_PARTIAL_GC
  783. if (this->enablePartialCollect)
  784. {
  785. #ifdef RECYCLER_WRITE_WATCH
  786. needWriteWatch = true;
  787. #endif
  788. }
  789. #endif
  790. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  791. #ifdef RECYCLER_WRITE_WATCH
  792. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  793. {
  794. if (needWriteWatch)
  795. {
  796. // need write watch to support concurrent and/or partial collection
  797. autoHeap.EnableWriteWatch();
  798. }
  799. }
  800. #endif
  801. #else
  802. Assert(!needWriteWatch);
  803. #endif
  804. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  805. recyclerListLock.Enter();
  806. this->next = recyclerList;
  807. recyclerList = this;
  808. recyclerListLock.Leave();
  809. #endif
  810. }
  811. BOOL
  812. Recycler::CollectionInProgress() const
  813. {
  814. return collectionState != CollectionStateNotCollecting;
  815. }
  816. BOOL
  817. Recycler::IsExiting() const
  818. {
  819. return (collectionState == Collection_Exit);
  820. }
  821. BOOL
  822. Recycler::IsSweeping() const
  823. {
  824. return ((collectionState & Collection_Sweep) == Collection_Sweep);
  825. }
  826. void
  827. Recycler::SetIsScriptActive(bool isScriptActive)
  828. {
  829. Assert(this->isInScript);
  830. Assert(this->isScriptActive != isScriptActive);
  831. this->isScriptActive = isScriptActive;
  832. if (isScriptActive)
  833. {
  834. this->tickCountNextDispose = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  835. }
  836. }
  837. void
  838. Recycler::SetIsInScript(bool isInScript)
  839. {
  840. Assert(this->isInScript != isInScript);
  841. this->isInScript = isInScript;
  842. }
  843. bool
  844. Recycler::HasNativeGCHost() const
  845. {
  846. return this->hasNativeGCHost;
  847. }
  848. void
  849. Recycler::SetHasNativeGCHost()
  850. {
  851. this->hasNativeGCHost = true;
  852. }
  853. bool
  854. Recycler::NeedOOMRescan() const
  855. {
  856. return this->needOOMRescan;
  857. }
  858. void
  859. Recycler::SetNeedOOMRescan()
  860. {
  861. this->needOOMRescan = true;
  862. }
  863. void
  864. Recycler::ClearNeedOOMRescan()
  865. {
  866. this->needOOMRescan = false;
  867. markContext.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  868. parallelMarkContext1.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  869. parallelMarkContext2.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  870. parallelMarkContext3.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  871. }
  872. bool
  873. Recycler::IsMemProtectMode()
  874. {
  875. return this->enableScanImplicitRoots;
  876. }
  877. size_t
  878. Recycler::GetUsedBytes()
  879. {
  880. return autoHeap.GetUsedBytes();
  881. }
  882. #if DBG
  883. BOOL
  884. Recycler::IsFreeObject(void * candidate)
  885. {
  886. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  887. if (heapBlock != NULL)
  888. {
  889. return heapBlock->IsFreeObject(candidate);
  890. }
  891. return false;
  892. }
  893. #endif
  894. BOOL
  895. Recycler::IsValidObject(void* candidate, size_t minimumSize)
  896. {
  897. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  898. if (heapBlock != NULL)
  899. {
  900. return heapBlock->IsValidObject(candidate) && (minimumSize == 0 || heapBlock->GetObjectSize(candidate) >= minimumSize);
  901. }
  902. return false;
  903. }
  904. void
  905. Recycler::Prime()
  906. {
  907. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  908. if (GetRecyclerFlagsTable().IsEnabled(Js::ForceFragmentAddressSpaceFlag))
  909. {
  910. // Never prime the recycler if we are forced to fragment address space
  911. return;
  912. }
  913. #endif
  914. autoHeap.Prime();
  915. }
  916. void
  917. Recycler::AddExternalMemoryUsage(size_t size)
  918. {
  919. this->autoHeap.uncollectedAllocBytes += size;
  920. this->autoHeap.uncollectedExternalBytes += size;
  921. // Generally normal GC can cleanup the uncollectedAllocBytes. But if external components
  922. // do fast large allocations in a row, normal GC might not kick in. Let's force the GC
  923. // here if we need to collect anyhow.
  924. CollectNow<CollectOnAllocation>();
  925. }
  926. bool Recycler::RequestExternalMemoryAllocation(size_t size)
  927. {
  928. AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
  929. return !allocationPolicyManager || allocationPolicyManager->RequestAlloc(size);
  930. }
  931. void Recycler::ReportExternalMemoryFailure(size_t size)
  932. {
  933. AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
  934. if (allocationPolicyManager)
  935. {
  936. allocationPolicyManager->ReportFailure(size);
  937. }
  938. }
  939. void Recycler::ReportExternalMemoryFree(size_t size)
  940. {
  941. AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
  942. if (allocationPolicyManager)
  943. {
  944. allocationPolicyManager->ReportFree(size);
  945. }
  946. }
  947. /*------------------------------------------------------------------------------------------------
  948. * Idle Decommit
  949. *------------------------------------------------------------------------------------------------*/
  950. void
  951. Recycler::EnterIdleDecommit()
  952. {
  953. autoHeap.EnterIdleDecommit();
  954. #ifdef IDLE_DECOMMIT_ENABLED
  955. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_None, IdleDecommitSignal_NeedTimer);
  956. #endif
  957. }
  958. void
  959. Recycler::LeaveIdleDecommit()
  960. {
  961. #ifdef IDLE_DECOMMIT_ENABLED
  962. bool allowTimer = (this->concurrentIdleDecommitEvent != nullptr);
  963. IdleDecommitSignal idleDecommitSignal = autoHeap.LeaveIdleDecommit(allowTimer);
  964. if (idleDecommitSignal != IdleDecommitSignal_None)
  965. {
  966. Assert(allowTimer);
  967. // Reduce the number of times we need to signal the background thread
  968. // by detecting whether the thread is waiting on a time out or not
  969. if (idleDecommitSignal == IdleDecommitSignal_NeedSignal ||
  970. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedTimer, IdleDecommitSignal_None) == IdleDecommitSignal_NeedSignal)
  971. {
  972. #if DBG
  973. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  974. {
  975. Output::Print(_u("Recycler Thread IdleDecommit Need Signal\n"));
  976. Output::Flush();
  977. }
  978. #endif
  979. #pragma prefast(suppress:6387, "INVALID_PARAM_VALUE_1 We will never reach here if concurrentIdleDecommitEvent is NULL.");
  980. SetEvent(this->concurrentIdleDecommitEvent);
  981. }
  982. }
  983. #else
  984. autoHeap.LeaveIdleDecommit(false /*allowTimer*/);
  985. #endif
  986. }
  987. /*------------------------------------------------------------------------------------------------
  988. * Freeing
  989. *------------------------------------------------------------------------------------------------*/
  990. bool Recycler::ExplicitFreeLeaf(void* buffer, size_t size)
  991. {
  992. return ExplicitFreeInternalWrapper<ObjectInfoBits::LeafBit>(buffer, size);
  993. }
  994. bool Recycler::ExplicitFreeNonLeaf(void* buffer, size_t size)
  995. {
  996. return ExplicitFreeInternalWrapper<ObjectInfoBits::NoBit>(buffer, size);
  997. }
  998. size_t Recycler::GetAllocSize(size_t size)
  999. {
  1000. size_t allocSize = size;
  1001. #ifdef RECYCLER_MEMORY_VERIFY
  1002. if (this->VerifyEnabled())
  1003. {
  1004. allocSize += verifyPad + sizeof(size_t);
  1005. Assert(allocSize > size);
  1006. }
  1007. #endif
  1008. return allocSize;
  1009. }
  1010. template <typename TBlockAttributes>
  1011. void Recycler::SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes)
  1012. {
  1013. Assert(!heapBlock->IsLargeHeapBlock());
  1014. Assert(heapBlock->GetObjectSize(buffer) == sizeCat);
  1015. SmallHeapBlockT<TBlockAttributes>* smallBlock = (SmallHeapBlockT<TBlockAttributes>*)heapBlock;
  1016. if ((attributes & ObjectInfoBits::LeafBit) == LeafBit)
  1017. {
  1018. Assert(smallBlock->IsLeafBlock());
  1019. }
  1020. else
  1021. {
  1022. Assert(smallBlock->IsAnyNormalBlock());
  1023. }
  1024. #ifdef RECYCLER_MEMORY_VERIFY
  1025. smallBlock->SetExplicitFreeBitForObject(buffer);
  1026. #endif
  1027. }
  1028. template <ObjectInfoBits attributes>
  1029. bool Recycler::ExplicitFreeInternalWrapper(void* buffer, size_t size)
  1030. {
  1031. Assert(buffer != nullptr);
  1032. Assert(size > 0);
  1033. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  1034. if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ExplicitFreePhase))
  1035. {
  1036. return false;
  1037. }
  1038. #endif
  1039. size_t allocSize = GetAllocSize(size);
  1040. if (HeapInfo::IsSmallObject(allocSize))
  1041. {
  1042. return ExplicitFreeInternal<attributes, SmallAllocationBlockAttributes>(buffer, size, HeapInfo::GetAlignedSizeNoCheck(allocSize));
  1043. }
  1044. if (HeapInfo::IsMediumObject(allocSize))
  1045. {
  1046. return ExplicitFreeInternal<attributes, MediumAllocationBlockAttributes>(buffer, size, HeapInfo::GetMediumObjectAlignedSizeNoCheck(allocSize));
  1047. }
  1048. return false;
  1049. }
  1050. template <ObjectInfoBits attributes, typename TBlockAttributes>
  1051. bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
  1052. {
  1053. // If the GC is in sweep state while FreeInternal is called, we might be executing a finalizer
  1054. // which called Free, which would cause a "sweepable" buffer to be free-listed. Don't allow this.
  1055. // Also don't allow freeing while we're shutting down the recycler since finalizers get executed
  1056. // at this stage too
  1057. if (this->IsSweeping() || this->IsExiting())
  1058. {
  1059. return false;
  1060. }
  1061. #if ENABLE_CONCURRENT_GC
  1062. // We shouldn't be freeing object when we are running GC in thread
  1063. Assert(this->IsConcurrentState() || !this->CollectionInProgress() || this->IsAllocatableCallbackState());
  1064. #else
  1065. Assert(!this->CollectionInProgress() || this->IsAllocatableCallbackState());
  1066. #endif
  1067. DebugOnly(RecyclerHeapObjectInfo info);
  1068. Assert(this->FindHeapObject(buffer, FindHeapObjectFlags_NoFreeBitVerify, info));
  1069. Assert((info.GetAttributes() & ~ObjectInfoBits::LeafBit) == 0); // Only NoBit or LeafBit
  1070. HeapInfo * heapInfo = this->GetHeapInfo<attributes>();
  1071. #if DBG || defined(RECYCLER_MEMORY_VERIFY) || defined(RECYCLER_PAGE_HEAP)
  1072. // Either the mainThreadHandle is null (we're not thread bound)
  1073. // or we should be calling this function on the main script thread
  1074. Assert(this->mainThreadHandle == NULL ||
  1075. ::GetCurrentThreadId() == ::GetThreadId(this->mainThreadHandle));
  1076. HeapBlock* heapBlock = this->FindHeapBlock(buffer);
  1077. Assert(heapBlock != nullptr);
  1078. #if DBG
  1079. Assert(heapInfo == heapBlock->GetHeapInfo());
  1080. #endif
  1081. #ifdef RECYCLER_PAGE_HEAP
  1082. if (this->IsPageHeapEnabled())
  1083. {
  1084. #ifdef STACK_BACK_TRACE
  1085. if (this->ShouldCapturePageHeapFreeStack())
  1086. {
  1087. if (heapBlock->IsLargeHeapBlock())
  1088. {
  1089. LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
  1090. if (largeHeapBlock->InPageHeapMode())
  1091. {
  1092. largeHeapBlock->CapturePageHeapFreeStack();
  1093. }
  1094. }
  1095. }
  1096. #endif
  1097. // Don't do actual explicit free in page heap mode
  1098. return false;
  1099. }
  1100. #endif
  1101. SetExplicitFreeBitOnSmallBlock<TBlockAttributes>(heapBlock, sizeCat, buffer, attributes);
  1102. #endif
  1103. if (TBlockAttributes::IsMediumBlock)
  1104. {
  1105. heapInfo->FreeMediumObject<attributes>(buffer, sizeCat);
  1106. }
  1107. else
  1108. {
  1109. heapInfo->FreeSmallObject<attributes>(buffer, sizeCat);
  1110. }
  1111. if (size > sizeof(FreeObject) || TBlockAttributes::IsMediumBlock)
  1112. {
  1113. // Do this on the background somehow?
  1114. byte expectedFill = 0;
  1115. size_t fillSize = size - sizeof(FreeObject);
  1116. #ifdef RECYCLER_MEMORY_VERIFY
  1117. if (this->VerifyEnabled())
  1118. {
  1119. expectedFill = Recycler::VerifyMemFill;
  1120. }
  1121. #endif
  1122. memset(((char*)buffer) + sizeof(FreeObject), expectedFill, fillSize);
  1123. }
  1124. #ifdef PROFILE_RECYCLER_ALLOC
  1125. if (this->trackerDictionary != nullptr)
  1126. {
  1127. this->SetTrackerData(buffer, &TrackerData::ExplicitFreeListObjectData);
  1128. }
  1129. #endif
  1130. return true;
  1131. }
  1132. /*------------------------------------------------------------------------------------------------
  1133. * Allocation
  1134. *------------------------------------------------------------------------------------------------*/
  1135. char *
  1136. Recycler::TryLargeAlloc(HeapInfo * heap, size_t size, ObjectInfoBits attributes, bool nothrow)
  1137. {
  1138. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1139. Assert(size != 0);
  1140. size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);
  1141. if (sizeCat == 0)
  1142. {
  1143. // overflow scenario
  1144. // if onthrow is false, throw out of memory
  1145. // otherwise, return null
  1146. if (nothrow == false)
  1147. {
  1148. this->OutOfMemory();
  1149. }
  1150. return nullptr;
  1151. }
  1152. char * memBlock;
  1153. if (heap->largeObjectBucket.largeBlockList != nullptr)
  1154. {
  1155. memBlock = heap->largeObjectBucket.largeBlockList->Alloc(sizeCat, attributes);
  1156. if (memBlock != nullptr)
  1157. {
  1158. #ifdef RECYCLER_ZERO_MEM_CHECK
  1159. VerifyLargeAllocZeroFill(memBlock, sizeCat, attributes);
  1160. #endif
  1161. return memBlock;
  1162. }
  1163. }
  1164. // We don't care whether a GC happened here or not, because we are not reusing freed
  1165. // large objects. We might try to allocate from existing block if we implement
  1166. // large object reuse.
  1167. if (!this->disableCollectOnAllocationHeuristics)
  1168. {
  1169. CollectNow<CollectOnAllocation>();
  1170. }
  1171. #ifdef RECYCLER_PAGE_HEAP
  1172. if (IsPageHeapEnabled())
  1173. {
  1174. if (heap->largeObjectBucket.IsPageHeapEnabled(attributes))
  1175. {
  1176. memBlock = heap->largeObjectBucket.PageHeapAlloc(this, sizeCat, size, (ObjectInfoBits)attributes, heap->pageHeapMode, nothrow);
  1177. if (memBlock != nullptr)
  1178. {
  1179. #ifdef RECYCLER_ZERO_MEM_CHECK
  1180. VerifyLargeAllocZeroFill(memBlock, size, attributes);
  1181. #endif
  1182. return memBlock;
  1183. }
  1184. }
  1185. }
  1186. #endif
  1187. LargeHeapBlock * heapBlock = heap->AddLargeHeapBlock(sizeCat);
  1188. if (heapBlock == nullptr)
  1189. {
  1190. return nullptr;
  1191. }
  1192. memBlock = heapBlock->Alloc(sizeCat, attributes);
  1193. Assert(memBlock != nullptr);
  1194. #ifdef RECYCLER_ZERO_MEM_CHECK
  1195. VerifyLargeAllocZeroFill(memBlock, sizeCat, attributes);
  1196. #endif
  1197. return memBlock;
  1198. }
  1199. template <bool nothrow>
  1200. char*
  1201. Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes)
  1202. {
  1203. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1204. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1205. size_t limit = (size_t)GetRecyclerFlagsTable().MaxSingleAllocSizeInMB * 1024 * 1024;
  1206. #else
  1207. size_t limit = (size_t)CONFIG_FLAG(MaxSingleAllocSizeInMB) * 1024 * 1024;
  1208. #endif
  1209. if (size >= limit)
  1210. {
  1211. if (nothrow == false)
  1212. {
  1213. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1214. if (GetRecyclerFlagsTable().EnableFatalErrorOnOOM)
  1215. {
  1216. if (this->IsMemProtectMode())
  1217. {
  1218. MemGCSingleAllocationLimit_unrecoverable_error();
  1219. }
  1220. else
  1221. {
  1222. RecyclerSingleAllocationLimit_unrecoverable_error();
  1223. }
  1224. }
  1225. #endif
  1226. this->OutOfMemory();
  1227. }
  1228. else
  1229. {
  1230. return nullptr;
  1231. }
  1232. }
  1233. char * addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1234. if (addr == nullptr)
  1235. {
  1236. // Force a collection and try to allocate again.
  1237. this->CollectNow<CollectNowForceInThread>();
  1238. addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1239. if (addr == nullptr)
  1240. {
  1241. if (nothrow == false)
  1242. {
  1243. // Still fails, we are out of memory
  1244. // Since nothrow is false, it's okay to throw here
  1245. this->OutOfMemory();
  1246. }
  1247. else
  1248. {
  1249. return nullptr;
  1250. }
  1251. }
  1252. }
  1253. autoHeap.uncollectedAllocBytes += size;
  1254. return addr;
  1255. }
  1256. // Explicitly instantiate both versions of LargeAlloc
  1257. template char* Recycler::LargeAlloc<true>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1258. template char* Recycler::LargeAlloc<false>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1259. void
  1260. Recycler::OutOfMemory()
  1261. {
  1262. outOfMemoryFunc();
  1263. }
  1264. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(void* recyclerAddr, size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1265. {
  1266. Assert(recyclerAddr);
  1267. return ((Recycler*)recyclerAddr)->GetNormalHeapBlockAllocatorInfoForNativeAllocation(allocSize, allocatorAddress, endAddressOffset, freeListOffset, allowBumpAllocation, isOOPJIT);
  1268. }
  1269. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1270. {
  1271. Assert(HeapInfo::IsAlignedSize(allocSize));
  1272. Assert(HeapInfo::IsSmallObject(allocSize));
  1273. allocatorAddress = (char*)this + offsetof(Recycler, autoHeap)
  1274. + offsetof(HeapInfoManager, defaultHeap)
  1275. + offsetof(HeapInfo, heapBuckets)
  1276. + sizeof(HeapBucketGroup<SmallAllocationBlockAttributes>)*((uint)(allocSize >> HeapConstants::ObjectAllocationShift) - 1)
  1277. + HeapBucketGroup<SmallAllocationBlockAttributes>::GetHeapBucketOffset()
  1278. + HeapBucketT<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetAllocatorHeadOffset();
  1279. endAddressOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetEndAddressOffset();
  1280. freeListOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetFreeObjectListOffset();;
  1281. if (!isOOPJIT)
  1282. {
  1283. Assert(allocatorAddress == GetAddressOfAllocator<NoBit>(allocSize));
  1284. Assert(endAddressOffset == GetEndAddressOffset<NoBit>(allocSize));
  1285. Assert(freeListOffset == GetFreeObjectListOffset<NoBit>(allocSize));
  1286. Assert(allowBumpAllocation == AllowNativeCodeBumpAllocation());
  1287. }
  1288. if (!allowBumpAllocation)
  1289. {
  1290. freeListOffset = endAddressOffset;
  1291. }
  1292. }
  1293. bool Recycler::AllowNativeCodeBumpAllocation()
  1294. {
  1295. // In debug builds, if we need to track allocation info, we pretend there is no pointer-bump-allocation space
  1296. // on this page, so that we always fail the check in native code and go to helper, which does the tracking.
  1297. #ifdef PROFILE_RECYCLER_ALLOC
  1298. if (this->trackerDictionary != nullptr)
  1299. {
  1300. return false;
  1301. }
  1302. #endif
  1303. #ifdef RECYCLER_MEMORY_VERIFY
  1304. if (this->verifyEnabled)
  1305. {
  1306. return false;
  1307. }
  1308. #endif
  1309. #ifdef RECYCLER_PAGE_HEAP
  1310. // Don't allow bump allocation in the JIT when page heap is turned on
  1311. if (this->IsPageHeapEnabled())
  1312. {
  1313. return false;
  1314. }
  1315. #endif
  1316. return true;
  1317. }
  1318. void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBlock, size_t sizeCat)
  1319. {
  1320. Assert(HeapInfo::IsAlignedSize(sizeCat));
  1321. Assert(HeapInfo::IsSmallObject(sizeCat));
  1322. #ifdef PROFILE_RECYCLER_ALLOC
  1323. AssertMsg(!Recycler::DoProfileAllocTracker(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1324. #endif
  1325. RecyclerMemoryTracking::ReportAllocation(recycler, memBlock, sizeCat);
  1326. RECYCLER_PERF_COUNTER_INC(LiveObject);
  1327. RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, sizeCat);
  1328. RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, sizeCat);
  1329. #ifdef RECYCLER_MEMORY_VERIFY
  1330. AssertMsg(!recycler->VerifyEnabled(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1331. #endif
  1332. }
  1333. /*------------------------------------------------------------------------------------------------
  1334. * FindRoots
  1335. *------------------------------------------------------------------------------------------------*/
  1336. // xplat-todo: Unify these two variants of GetStackBase
  1337. #ifdef _WIN32
  1338. static void* GetStackBase()
  1339. {
  1340. return ((NT_TIB *)NtCurrentTeb())->StackBase;
  1341. }
  1342. #else
  1343. static void* GetStackBase()
  1344. {
  1345. ULONG_PTR highLimit = 0;
  1346. ULONG_PTR lowLimit = 0;
  1347. ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
  1348. return (void*) highLimit;
  1349. }
  1350. #endif
  1351. #if _M_IX86
  1352. // REVIEW: For x86, do we care about scanning esp/ebp?
  1353. // At GC time, they shouldn't be pointing to GC memory.
  1354. #define SAVE_THREAD_CONTEXT() \
  1355. void** targetBuffer = this->savedThreadContext.GetRegisters(); \
  1356. __asm { push eax } \
  1357. __asm { mov eax, targetBuffer } \
  1358. __asm { mov [eax], esp} \
  1359. __asm { mov [eax+0x4], eax} \
  1360. __asm { mov [eax+0x8], ebx} \
  1361. __asm { mov [eax+0xc], ecx} \
  1362. __asm { mov [eax+0x10], edx} \
  1363. __asm { mov [eax+0x14], ebp} \
  1364. __asm { mov [eax+0x18], esi} \
  1365. __asm { mov [eax+0x1c], edi} \
  1366. __asm { pop eax } \
  1367. SAVE_THREAD_ASAN_FAKE_STACK()
  1368. #elif _M_ARM
  1369. #define SAVE_THREAD_CONTEXT() \
  1370. arm_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1371. SAVE_THREAD_ASAN_FAKE_STACK()
  1372. #elif _M_ARM64
  1373. #define SAVE_THREAD_CONTEXT() \
  1374. arm64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1375. SAVE_THREAD_ASAN_FAKE_STACK()
  1376. #elif _M_AMD64
  1377. #define SAVE_THREAD_CONTEXT() \
  1378. amd64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
  1379. SAVE_THREAD_ASAN_FAKE_STACK()
  1380. #else
  1381. #error Unexpected architecture
  1382. #endif
  1383. size_t
  1384. Recycler::ScanArena(ArenaData * alloc, bool background)
  1385. {
  1386. #if DBG_DUMP
  1387. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1388. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1389. {
  1390. this->forceTraceMark = true;
  1391. Output::Print(_u("Scanning Guest Arena %p: "), alloc);
  1392. }
  1393. #endif
  1394. size_t scanRootBytes = 0;
  1395. BEGIN_DUMP_OBJECT_ADDRESS(_u("Guest Arena"), alloc);
  1396. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  1397. // The new write watch batching logic broke the write watch handling here.
  1398. // For now, just disable write watch for guest arenas.
  1399. // TODO: Re-enable this in the future.
  1400. #if FALSE
  1401. // Note, guest arenas are allocated out of the large block page allocator.
  1402. bool writeWatch = alloc->GetPageAllocator() == &this->recyclerLargeBlockPageAllocator;
  1403. // Only use write watch when we are doing rescan (Partial collect or finish concurrent)
  1404. if (writeWatch && this->collectionState == CollectionStateRescanFindRoots)
  1405. {
  1406. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetBigBlocks(background));
  1407. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetFullBlocks());
  1408. }
  1409. else
  1410. #endif
  1411. #endif
  1412. {
  1413. scanRootBytes += TryMarkBigBlockList(alloc->GetBigBlocks(background));
  1414. scanRootBytes += TryMarkBigBlockList(alloc->GetFullBlocks());
  1415. }
  1416. scanRootBytes += TryMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  1417. END_DUMP_OBJECT(this);
  1418. #if DBG_DUMP
  1419. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1420. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1421. {
  1422. this->forceTraceMark = false;
  1423. Output::Print(_u("\n"));
  1424. Output::Flush();
  1425. }
  1426. #endif
  1427. // The arena has been scanned so the full blocks can be rearranged at this point
  1428. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1429. if (background || !GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan)
  1430. #endif
  1431. {
  1432. alloc->SetLockBlockList(false);
  1433. }
  1434. return scanRootBytes;
  1435. }
  1436. #if DBG
  1437. bool
  1438. Recycler::ExpectStackSkip() const
  1439. {
  1440. // Okay to skip the stack scan if we're in leak check mode
  1441. bool expectStackSkip = false;
  1442. #ifdef LEAK_REPORT
  1443. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  1444. #endif
  1445. #ifdef CHECK_MEMORY_LEAK
  1446. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().CheckMemoryLeak;
  1447. #endif
  1448. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  1449. expectStackSkip = expectStackSkip || (this->objectGraphDumper != nullptr);
  1450. #endif
  1451. #if defined(INTERNAL_MEM_PROTECT_HEAP_ALLOC)
  1452. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().MemProtectHeap;
  1453. #endif
  1454. return expectStackSkip || isExternalStackSkippingGC;
  1455. }
  1456. #endif
  1457. #pragma warning(push)
  1458. #pragma warning(disable:4731) // 'pointer' : frame pointer register 'register' modified by inline assembly code
  1459. // disable address sanitizer, since it doesn't handle custom stack walks well
  1460. NO_SANITIZE_ADDRESS
  1461. size_t
  1462. Recycler::ScanStack()
  1463. {
  1464. if (this->skipStack)
  1465. {
  1466. #ifdef RECYCLER_TRACE
  1467. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  1468. #endif
  1469. #if ENABLE_CONCURRENT_GC
  1470. Assert(this->isFinishGCOnIdle || this->isConcurrentGCOnIdle || this->ExpectStackSkip());
  1471. #else
  1472. Assert(this->ExpectStackSkip());
  1473. #endif
  1474. return 0;
  1475. }
  1476. #ifdef RECYCLER_STATS
  1477. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1478. #endif
  1479. GCETW(GC_SCANSTACK_START, (this));
  1480. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ScanStackPhase);
  1481. SAVE_THREAD_CONTEXT();
  1482. void * stackTop = this->savedThreadContext.GetStackTop();
  1483. void * stackStart = GetStackBase();
  1484. Assert(stackStart > stackTop);
  1485. size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
  1486. #if DBG_DUMP
  1487. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1488. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1489. {
  1490. this->forceTraceMark = true;
  1491. Output::Print(_u("Scanning Stack %p(%8d): "), stackTop, (char *)stackStart - (char *)stackTop);
  1492. }
  1493. #endif
  1494. collectionWrapper->OnScanStackCallback((void**)stackTop, stackScanned, this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
  1495. bool doSpecialMark = collectionWrapper->DoSpecialMarkOnScanStack();
  1496. BEGIN_DUMP_OBJECT(this, _u("Registers"));
  1497. // We will not scan interior pointers on stack if we are not in script or we are in mem-protect mode.
  1498. if (!this->HasNativeGCHost() && (!this->isInScript || this->IsMemProtectMode()))
  1499. {
  1500. if (doSpecialMark)
  1501. {
  1502. ScanMemoryInline<true>(
  1503. this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1504. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1505. }
  1506. else
  1507. {
  1508. ScanMemoryInline<false>(
  1509. this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1510. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1511. }
  1512. }
  1513. else
  1514. {
  1515. // We may have interior pointers on the stack such as pointers in the middle of the character buffers backing a JavascriptString or SubString object.
  1516. // To prevent UAFs of these buffers after the GC we will always do MarkInterior for the pointers on stack. This is necessary only when we are doing a
  1517. // GC while running a script or when we have a host who allocates objects on the Chakra heap.
  1518. if (doSpecialMark)
  1519. {
  1520. ScanMemoryInline<true, true /* forceInterior */>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1521. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1522. }
  1523. else
  1524. {
  1525. ScanMemoryInline<false, true /* forceInterior */>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
  1526. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1527. }
  1528. }
  1529. END_DUMP_OBJECT(this);
  1530. BEGIN_DUMP_OBJECT(this, _u("Stack"));
  1531. // We will not scan interior pointers on stack if we are not in script or we are in mem-protect mode.
  1532. if (!this->HasNativeGCHost() && (!this->isInScript || this->IsMemProtectMode()))
  1533. {
  1534. if (doSpecialMark)
  1535. {
  1536. ScanMemoryInline<true>((void**) stackTop, stackScanned
  1537. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1538. }
  1539. else
  1540. {
  1541. ScanMemoryInline<false>((void**) stackTop, stackScanned
  1542. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1543. }
  1544. }
  1545. else
  1546. {
  1547. // We may have interior pointers on the stack such as pointers in the middle of the character buffers backing a JavascriptString or SubString object.
  1548. // To prevent UAFs of these buffers after the GC we will always do MarkInterior for the pointers on stack. This is necessary only when we are doing a
  1549. // GC while running a script or when we have a host who allocates objects on the Chakra heap.
  1550. if (doSpecialMark)
  1551. {
  1552. ScanMemoryInline<true, true /* forceInterior */>((void**)stackTop, stackScanned
  1553. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1554. }
  1555. else
  1556. {
  1557. ScanMemoryInline<false, true /* forceInterior */>((void**)stackTop, stackScanned
  1558. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  1559. }
  1560. }
  1561. END_DUMP_OBJECT(this);
  1562. #if DBG_DUMP
  1563. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1564. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1565. {
  1566. this->forceTraceMark = false;
  1567. Output::Print(_u("\n"));
  1568. Output::Flush();
  1569. }
  1570. #endif
  1571. RECYCLER_PROFILE_EXEC_END(this, Js::ScanStackPhase);
  1572. RECYCLER_STATS_ADD(this, stackCount, this->collectionStats.markData.markCount - lastMarkCount);
  1573. GCETW(GC_SCANSTACK_STOP, (this));
  1574. return stackScanned;
  1575. }
  1576. #pragma warning(pop)
  1577. template <bool background>
  1578. size_t Recycler::ScanPinnedObjects()
  1579. {
  1580. size_t scanRootBytes = 0;
  1581. BEGIN_DUMP_OBJECT(this, _u("Pinned"));
  1582. {
  1583. this->TryMarkNonInterior(transientPinnedObject, &transientPinnedObject /* parentReference */);
  1584. if (this->scanPinnedObjectMap)
  1585. {
  1586. // We are scanning the pinned object map now, we don't need to rescan unless
  1587. // we reset mark or we add stuff to the map in Recycler::AddRef
  1588. this->scanPinnedObjectMap = false;
  1589. pinnedObjectMap.MapAndRemoveIf([this, &scanRootBytes](void * obj, PinRecord const& refCount)
  1590. {
  1591. if (refCount == 0)
  1592. {
  1593. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  1594. #ifdef STACK_BACK_TRACE
  1595. Assert(refCount.stackBackTraces == nullptr);
  1596. #endif
  1597. #endif
  1598. // Only remove if we are not doing this in the background.
  1599. return !background;
  1600. }
  1601. this->TryMarkNonInterior(obj, static_cast<void*>(const_cast<PinRecord*>(&refCount)) /* parentReference */);
  1602. scanRootBytes += sizeof(void *);
  1603. return false;
  1604. });
  1605. if (!background)
  1606. {
  1607. this->hasPendingUnpinnedObject = false;
  1608. }
  1609. }
  1610. }
  1611. END_DUMP_OBJECT(this);
  1612. if (background)
  1613. {
  1614. // Re-enable resize now that we are done
  1615. pinnedObjectMap.EnableResize();
  1616. }
  1617. return scanRootBytes;
  1618. }
  1619. void
  1620. RecyclerScanMemoryCallback::operator()(void** obj, size_t byteCount)
  1621. {
  1622. this->recycler->ScanMemoryInline<false>(obj, byteCount);
  1623. }
  1624. size_t
  1625. Recycler::FindRoots()
  1626. {
  1627. size_t scanRootBytes = 0;
  1628. #ifdef RECYCLER_STATS
  1629. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1630. #endif
  1631. GCETW(GC_SCANROOTS_START, (this));
  1632. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootPhase);
  1633. #ifdef ENABLE_PROJECTION
  1634. {
  1635. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  1636. BEGIN_DUMP_OBJECT(this, _u("External Weak Referenced Roots"));
  1637. Assert(!this->IsInRefCountTrackingForProjection());
  1638. #if DBG
  1639. AutoIsInRefCountTrackingForProjection autoIsInRefCountTrackingForProjection(this);
  1640. #endif
  1641. collectionWrapper->MarkExternalWeakReferencedObjects(this->inPartialCollectMode);
  1642. END_DUMP_OBJECT(this);
  1643. }
  1644. #endif
  1645. // go through ITracker* stuff. Don't need to do it if we are doing a partial collection
  1646. // as we keep track and mark all trackable objects.
  1647. // Do this first because the host might unpin stuff in the process
  1648. if (externalRootMarker != NULL)
  1649. {
  1650. #if ENABLE_PARTIAL_GC
  1651. if (!this->inPartialCollectMode)
  1652. #endif
  1653. {
  1654. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootExtPhase);
  1655. #if DBG_DUMP
  1656. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1657. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1658. {
  1659. this->forceTraceMark = true;
  1660. Output::Print(_u("Scanning External Roots: "));
  1661. }
  1662. #endif
  1663. BEGIN_DUMP_OBJECT(this, _u("External Roots"));
  1664. // PARTIALGC-TODO: How do we count external roots?
  1665. externalRootMarker(externalRootMarkerContext);
  1666. END_DUMP_OBJECT(this);
  1667. #if DBG_DUMP
  1668. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1669. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1670. {
  1671. this->forceTraceMark = false;
  1672. Output::Print(_u("\n"));
  1673. Output::Flush();
  1674. }
  1675. #endif
  1676. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootExtPhase);
  1677. }
  1678. }
  1679. #if DBG_DUMP
  1680. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1681. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1682. {
  1683. this->forceTraceMark = true;
  1684. Output::Print(_u("Scanning Pinned Objects: "));
  1685. }
  1686. #endif
  1687. scanRootBytes += this->ScanPinnedObjects</*background = */false>();
  1688. #if DBG_DUMP
  1689. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1690. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1691. {
  1692. this->forceTraceMark = false;
  1693. Output::Print(_u("\n"));
  1694. Output::Flush();
  1695. }
  1696. #endif
  1697. #if ENABLE_CONCURRENT_GC
  1698. Assert(!this->hasPendingConcurrentFindRoot);
  1699. #endif
  1700. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootArenaPhase);
  1701. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  1702. while (guestArenaIter.Next())
  1703. {
  1704. GuestArenaAllocator& allocator = guestArenaIter.Data();
  1705. #if ENABLE_CONCURRENT_GC
  1706. if (allocator.pendingDelete)
  1707. {
  1708. Assert(this->hasPendingDeleteGuestArena);
  1709. allocator.SetLockBlockList(false);
  1710. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  1711. }
  1712. else if (this->backgroundFinishMarkCount == 0)
  1713. #endif
  1714. {
  1715. // Only scan arena if we haven't finished mark in the background
  1716. // (which is true if concurrent GC is disabled)
  1717. scanRootBytes += ScanArena(&allocator, false);
  1718. }
  1719. }
  1720. this->hasPendingDeleteGuestArena = false;
  1721. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  1722. while (externalGuestArenaIter.Next())
  1723. {
  1724. scanRootBytes += ScanArena(externalGuestArenaIter.Data(), false);
  1725. }
  1726. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootArenaPhase);
  1727. this->ScanImplicitRoots();
  1728. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootPhase);
  1729. GCETW(GC_SCANROOTS_STOP, (this));
  1730. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  1731. return scanRootBytes;
  1732. }
  1733. void
  1734. Recycler::ScanImplicitRoots()
  1735. {
  1736. if (this->enableScanImplicitRoots)
  1737. {
  1738. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindImplicitRootPhase);
  1739. if (!this->hasScannedInitialImplicitRoots)
  1740. {
  1741. this->ScanInitialImplicitRoots();
  1742. this->hasScannedInitialImplicitRoots = true;
  1743. }
  1744. else
  1745. {
  1746. this->ScanNewImplicitRoots();
  1747. }
  1748. RECYCLER_PROFILE_EXEC_END(this, Js::FindImplicitRootPhase);
  1749. }
  1750. }
  1751. size_t
  1752. Recycler::TryMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  1753. {
  1754. size_t scanRootBytes = 0;
  1755. ArenaMemoryBlock *blockp = memoryBlocks;
  1756. while (blockp != NULL)
  1757. {
  1758. void** base=(void**)blockp->GetBytes();
  1759. size_t byteCount = blockp->nbytes;
  1760. scanRootBytes += byteCount;
  1761. this->ScanMemory<false>(base, byteCount);
  1762. blockp = blockp->next;
  1763. }
  1764. return scanRootBytes;
  1765. }
  1766. #if ENABLE_CONCURRENT_GC
  1767. #if FALSE
  1768. size_t
  1769. Recycler::TryMarkBigBlockListWithWriteWatch(BigBlock * memoryBlocks)
  1770. {
  1771. DWORD pageSize = AutoSystemInfo::PageSize;
  1772. size_t scanRootBytes = 0;
  1773. BigBlock *blockp = memoryBlocks;
  1774. // Reset the write watch bit if we are scanning this in the background thread
  1775. DWORD const writeWatchFlags = this->IsConcurrentFindRootState()? WRITE_WATCH_FLAG_RESET : 0;
  1776. while (blockp != NULL)
  1777. {
  1778. char * currentAddress = (char *)blockp->GetBytes();
  1779. char * endAddress = currentAddress + blockp->currentByte;
  1780. char * currentPageStart = (char *)blockp->allocation;
  1781. while (currentAddress < endAddress)
  1782. {
  1783. void * written;
  1784. ULONG_PTR count = 1;
  1785. if (::GetWriteWatch(writeWatchFlags, currentPageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) != 0 || count == 1)
  1786. {
  1787. char * currentEnd = min(currentPageStart + pageSize, endAddress);
  1788. size_t byteCount = (size_t)(currentEnd - currentAddress);
  1789. scanRootBytes += byteCount;
  1790. this->ScanMemory<false>((void **)currentAddress, byteCount);
  1791. }
  1792. currentPageStart += pageSize;
  1793. currentAddress = currentPageStart;
  1794. }
  1795. blockp = blockp->nextBigBlock;
  1796. }
  1797. return scanRootBytes;
  1798. }
  1799. #endif
  1800. #endif
  1801. size_t
  1802. Recycler::TryMarkBigBlockList(BigBlock * memoryBlocks)
  1803. {
  1804. size_t scanRootBytes = 0;
  1805. BigBlock *blockp = memoryBlocks;
  1806. while (blockp != NULL)
  1807. {
  1808. void** base = (void**)blockp->GetBytes();
  1809. size_t byteCount = blockp->currentByte;
  1810. scanRootBytes += byteCount;
  1811. this->ScanMemory<false>(base, byteCount);
  1812. blockp = blockp->nextBigBlock;
  1813. }
  1814. return scanRootBytes;
  1815. }
  1816. void
  1817. Recycler::ScanInitialImplicitRoots()
  1818. {
  1819. autoHeap.ScanInitialImplicitRoots();
  1820. }
  1821. void
  1822. Recycler::ScanNewImplicitRoots()
  1823. {
  1824. autoHeap.ScanNewImplicitRoots();
  1825. }
  1826. /*------------------------------------------------------------------------------------------------
  1827. * Mark
  1828. *------------------------------------------------------------------------------------------------*/
  1829. void
  1830. Recycler::ResetMarks(ResetMarkFlags flags)
  1831. {
  1832. Assert(!this->CollectionInProgress());
  1833. this->SetCollectionState(CollectionStateResetMarks);
  1834. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Reset marks\n"));
  1835. GCETW(GC_RESETMARKS_START, (this));
  1836. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetMarksPhase);
  1837. Assert(IsMarkStackEmpty());
  1838. this->scanPinnedObjectMap = true;
  1839. this->hasScannedInitialImplicitRoots = false;
  1840. heapBlockMap.ResetMarks();
  1841. autoHeap.ResetMarks(flags);
  1842. RECYCLER_PROFILE_EXEC_END(this, Js::ResetMarksPhase);
  1843. GCETW(GC_RESETMARKS_STOP, (this));
  1844. #ifdef RECYCLER_MARK_TRACK
  1845. this->ClearMarkMap();
  1846. #endif
  1847. }
  1848. #ifdef RECYCLER_MARK_TRACK
  1849. void Recycler::ClearMarkMap()
  1850. {
  1851. this->markMap->Clear();
  1852. }
  1853. void Recycler::PrintMarkMap()
  1854. {
  1855. this->markMap->Map([](void* key, void* value)
  1856. {
  1857. Output::Print(_u("0x%P => 0x%P\n"), key, value);
  1858. });
  1859. }
  1860. #endif
  1861. #if DBG
  1862. void
  1863. Recycler::CheckAllocExternalMark() const
  1864. {
  1865. Assert(!disableThreadAccessCheck);
  1866. Assert(GetCurrentThreadContextId() == mainThreadId);
  1867. #if ENABLE_CONCURRENT_GC
  1868. #ifdef HEAP_ENUMERATION_VALIDATION
  1869. Assert((this->IsMarkState() || this->IsPostEnumHeapValidationInProgress()) && collectionState != CollectionStateConcurrentMark);
  1870. #else
  1871. Assert(this->IsMarkState() && collectionState != CollectionStateConcurrentMark);
  1872. #endif
  1873. #else
  1874. Assert(this->IsMarkState());
  1875. #endif
  1876. }
  1877. #endif
  1878. void
  1879. Recycler::TryMarkNonInterior(void* candidate, void* parentReference)
  1880. {
  1881. #ifdef HEAP_ENUMERATION_VALIDATION
  1882. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1883. #else
  1884. Assert(!isHeapEnumInProgress);
  1885. #endif
  1886. Assert(this->collectionState != CollectionStateParallelMark);
  1887. markContext.Mark</*parallel */ false, /* interior */ false, /* doSpecialMark */ false>(candidate, parentReference);
  1888. }
  1889. void
  1890. Recycler::TryMarkInterior(void* candidate, void* parentReference)
  1891. {
  1892. #ifdef HEAP_ENUMERATION_VALIDATION
  1893. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1894. #else
  1895. Assert(!isHeapEnumInProgress);
  1896. #endif
  1897. Assert(this->collectionState != CollectionStateParallelMark);
  1898. markContext.Mark</*parallel */ false, /* interior */ true, /* doSpecialMark */ false>(candidate, parentReference);
  1899. }
  1900. template <bool parallel, bool interior>
  1901. void
  1902. Recycler::ProcessMarkContext(MarkContext * markContext)
  1903. {
  1904. #if ENABLE_CONCURRENT_GC
  1905. // Copying the markContext onto the stack messes up tracked object handling, because
  1906. // the tracked object will call TryMark[Non]Interior to report its references.
  1907. // These functions implicitly use the main markContext on the Recycler, but this will
  1908. // be overridden if we're processing the main markContext here.
  1909. // So, don't do this if we are going to process tracked objects.
  1910. // (This will be the case if we're not queuing and we're not in partial mode, which ignores tracked objects.)
  1911. // In this case we shouldn't be parallel anyway, so we don't need to worry about cache behavior.
  1912. // We should revisit how we manage markContexts in general in the future, and clean this up
  1913. // by passing the MarkContext through to the tracked object's Mark method.
  1914. #if ENABLE_PARTIAL_GC
  1915. if (this->inPartialCollectMode || DoQueueTrackedObject())
  1916. #else
  1917. if (DoQueueTrackedObject())
  1918. #endif
  1919. {
  1920. // The markContext as passed is one of the markContexts that lives on the Recycler.
  1921. // Copy it locally for processing.
  1922. // This serves two purposes:
  1923. // (1) Allow for better codegen because the markContext is local and we don't need to track the this pointer separately
  1924. // (because all the key processing is inlined into this function).
  1925. // (2) Ensure we don't have weird cache behavior because we're accidentally writing to the same cache line from
  1926. // multiple threads during parallel marking.
  1927. MarkContext localMarkContext = *markContext;
  1928. // Do the actual marking.
  1929. localMarkContext.ProcessMark<parallel, interior>();
  1930. // Copy back to the original location.
  1931. *markContext = localMarkContext;
  1932. // Clear the local mark context.
  1933. localMarkContext.Clear();
  1934. }
  1935. else
  1936. #endif
  1937. {
  1938. Assert(!parallel);
  1939. markContext->ProcessMark<parallel, interior>();
  1940. }
  1941. }
  1942. void
  1943. Recycler::ProcessMark(bool background)
  1944. {
  1945. #if ENABLE_CONCURRENT_GC
  1946. if (background)
  1947. {
  1948. GCETW(GC_BACKGROUNDMARK_START, (this, backgroundRescanCount));
  1949. }
  1950. else
  1951. #endif
  1952. {
  1953. GCETW(GC_MARK_START, (this));
  1954. }
  1955. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1956. if (this->enableScanInteriorPointers)
  1957. {
  1958. this->ProcessMarkContext</* parallel */ false, /* interior */ true>(&markContext);
  1959. }
  1960. else
  1961. {
  1962. this->ProcessMarkContext</* parallel */ false, /* interior */ false>(&markContext);
  1963. }
  1964. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  1965. #if ENABLE_CONCURRENT_GC
  1966. if (background)
  1967. {
  1968. GCETW(GC_BACKGROUNDMARK_STOP, (this, backgroundRescanCount));
  1969. }
  1970. else
  1971. #endif
  1972. {
  1973. GCETW(GC_MARK_STOP, (this));
  1974. }
  1975. DebugOnly(this->markContext.VerifyPostMarkState());
  1976. }
  1977. void
  1978. Recycler::ProcessParallelMark(bool background, MarkContext * markContext)
  1979. {
  1980. #if ENABLE_CONCURRENT_GC
  1981. if (background)
  1982. {
  1983. GCETW(GC_BACKGROUNDPARALLELMARK_START, (this, backgroundRescanCount));
  1984. }
  1985. else
  1986. #endif
  1987. {
  1988. GCETW(GC_PARALLELMARK_START, (this));
  1989. }
  1990. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1991. if (this->enableScanInteriorPointers)
  1992. {
  1993. this->ProcessMarkContext</* parallel */ true, /* interior */ true>(markContext);
  1994. }
  1995. else
  1996. {
  1997. this->ProcessMarkContext</* parallel */ true, /* interior */ false>(markContext);
  1998. }
  1999. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  2000. #if ENABLE_CONCURRENT_GC
  2001. if (background)
  2002. {
  2003. GCETW(GC_BACKGROUNDPARALLELMARK_STOP, (this, backgroundRescanCount));
  2004. }
  2005. else
  2006. #endif
  2007. {
  2008. GCETW(GC_PARALLELMARK_STOP, (this));
  2009. }
  2010. }
  2011. void
  2012. Recycler::Mark()
  2013. {
  2014. // Marking in thread, we can just pre-mark them
  2015. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InThreadImplicitRoots : ResetMarkFlags_InThread);
  2016. this->SetCollectionState(CollectionStateFindRoots);
  2017. RootMark(CollectionStateMark);
  2018. }
  2019. #if ENABLE_CONCURRENT_GC
  2020. void
  2021. Recycler::StartQueueTrackedObject()
  2022. {
  2023. Assert(!this->queueTrackedObject);
  2024. Assert(!this->HasPendingTrackObjects());
  2025. #if ENABLE_PARTIAL_GC
  2026. Assert(this->clientTrackedObjectList.Empty());
  2027. Assert(!this->inPartialCollectMode);
  2028. #endif
  2029. this->queueTrackedObject = true;
  2030. }
  2031. bool
  2032. Recycler::DoQueueTrackedObject() const
  2033. {
  2034. Assert(this->queueTrackedObject || !this->IsConcurrentMarkState());
  2035. Assert(this->queueTrackedObject || this->isProcessingTrackedObjects || !this->HasPendingTrackObjects());
  2036. #if ENABLE_PARTIAL_GC
  2037. Assert(this->queueTrackedObject || this->inPartialCollectMode || !(this->collectionState == CollectionStateParallelMark));
  2038. Assert(!this->queueTrackedObject || (this->clientTrackedObjectList.Empty() && !this->inPartialCollectMode));
  2039. #else
  2040. Assert(this->queueTrackedObject || !(this->collectionState == CollectionStateParallelMark));
  2041. #endif
  2042. return this->queueTrackedObject;
  2043. }
  2044. #endif
  2045. void
  2046. Recycler::ResetCollectionState()
  2047. {
  2048. Assert(IsMarkStackEmpty());
  2049. this->SetCollectionState(CollectionStateNotCollecting);
  2050. #if ENABLE_CONCURRENT_GC
  2051. this->backgroundFinishMarkCount = 0;
  2052. #endif
  2053. this->inExhaustiveCollection = false;
  2054. this->inDecommitNowCollection = false;
  2055. #if ENABLE_CONCURRENT_GC
  2056. CleanupPendingUnroot();
  2057. #endif
  2058. #if ENABLE_PARTIAL_GC
  2059. if (inPartialCollectMode)
  2060. {
  2061. FinishPartialCollect();
  2062. }
  2063. #endif
  2064. #if ENABLE_CONCURRENT_GC
  2065. Assert(!this->DoQueueTrackedObject());
  2066. #endif
  2067. #ifdef RECYCLER_FINALIZE_CHECK
  2068. // Reset the collection stats.
  2069. this->collectionStats.finalizeCount = this->autoHeap.GetFinalizeCount();
  2070. #endif
  2071. }
  2072. void
  2073. Recycler::ResetMarkCollectionState()
  2074. {
  2075. // If we aborted after doing a background Rescan, there will be entries in the markContext.
  2076. // Abort these entries and reset the markContext state.
  2077. markContext.Abort();
  2078. // If we aborted after doing a background parallel Mark, we wouldn't have cleaned up the
  2079. // parallel markContexts yet. Clean these up now.
  2080. // Note parallelMarkContext1 is not used in background parallel (see DoBackgroundParallelMark)
  2081. parallelMarkContext2.Cleanup();
  2082. parallelMarkContext3.Cleanup();
  2083. this->ClearNeedOOMRescan();
  2084. DebugOnly(this->isProcessingRescan = false);
  2085. #if ENABLE_CONCURRENT_GC
  2086. // If we're reseting the mark collection state, we need to unlock the block list
  2087. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  2088. while (guestArenaIter.Next())
  2089. {
  2090. GuestArenaAllocator& allocator = guestArenaIter.Data();
  2091. allocator.SetLockBlockList(false);
  2092. }
  2093. this->queueTrackedObject = false;
  2094. #endif
  2095. ResetCollectionState();
  2096. }
  2097. void
  2098. Recycler::ResetHeuristicCounters()
  2099. {
  2100. autoHeap.lastUncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  2101. autoHeap.uncollectedAllocBytes = 0;
  2102. autoHeap.uncollectedExternalBytes = 0;
  2103. ResetPartialHeuristicCounters();
  2104. }
  2105. void Recycler::ResetPartialHeuristicCounters()
  2106. {
  2107. #if ENABLE_PARTIAL_GC
  2108. autoHeap.uncollectedNewPageCount = 0;
  2109. #endif
  2110. }
  2111. void
  2112. Recycler::ScheduleNextCollection()
  2113. {
  2114. this->tickCountNextCollection = ::GetTickCount() + RecyclerHeuristic::TickCountCollection;
  2115. this->tickCountNextFinishCollection = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  2116. }
  2117. #if ENABLE_CONCURRENT_GC
  2118. void
  2119. Recycler::PrepareSweep()
  2120. {
  2121. autoHeap.PrepareSweep();
  2122. }
  2123. #endif
  2124. size_t
  2125. Recycler::RescanMark(DWORD waitTime)
  2126. {
  2127. bool const onLowMemory = this->NeedOOMRescan();
  2128. // REVIEW: Why are we asserting for DoQueueTrackedObject here?
  2129. // Should we split this into different asserts depending on whether
  2130. // concurrent or partial is enabled?
  2131. #if ENABLE_CONCURRENT_GC
  2132. #if ENABLE_PARTIAL_GC
  2133. Assert(this->inPartialCollectMode || DoQueueTrackedObject());
  2134. #else
  2135. Assert(DoQueueTrackedObject());
  2136. #endif
  2137. #endif
  2138. {
  2139. // We are about to do a rescan mark, which for consistency requires the runtime to stop any additional mutator threads
  2140. AUTO_NO_EXCEPTION_REGION;
  2141. collectionWrapper->PreRescanMarkCallback();
  2142. }
  2143. // Always called in-thread
  2144. Assert(collectionState == CollectionStateRescanFindRoots);
  2145. #if ENABLE_CONCURRENT_GC
  2146. if (!onLowMemory && // Don't do background finish mark if we are low on memory
  2147. // Only do background finish mark if we have a time limit or it is forced
  2148. (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase) || waitTime != INFINITE) &&
  2149. // Don't do background finish mark if we failed to finish mark too many times
  2150. (this->backgroundFinishMarkCount < RecyclerHeuristic::MaxBackgroundFinishMarkCount(this->GetRecyclerFlagsTable())))
  2151. {
  2152. this->PrepareBackgroundFindRoots();
  2153. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  2154. {
  2155. this->backgroundFinishMarkCount++;
  2156. this->PrepareSweep();
  2157. GCETW(GC_RESCANMARKWAIT_START, (this, waitTime));
  2158. const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::RescanMark);
  2159. GCETW(GC_RESCANMARKWAIT_STOP, (this, !waited));
  2160. if (!waited)
  2161. {
  2162. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase, _u("Finish mark timed out\n"));
  2163. {
  2164. // We timed out doing the finish mark, notify the runtime
  2165. AUTO_NO_EXCEPTION_REGION;
  2166. collectionWrapper->RescanMarkTimeoutCallback();
  2167. }
  2168. return Recycler::InvalidScanRootBytes;
  2169. }
  2170. Assert(collectionState == CollectionStateRescanWait);
  2171. this->SetCollectionState(CollectionStateRescanFindRoots);
  2172. #ifdef RECYCLER_WRITE_WATCH
  2173. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2174. {
  2175. Assert(autoHeap.GetWriteWatchPageCount() == 0);
  2176. }
  2177. #endif
  2178. return this->backgroundRescanRootBytes;
  2179. }
  2180. this->RevertPrepareBackgroundFindRoots();
  2181. }
  2182. #endif
  2183. #if ENABLE_CONCURRENT_GC
  2184. this->backgroundFinishMarkCount = 0;
  2185. #endif
  2186. return FinishMarkRescan(false) * AutoSystemInfo::PageSize;
  2187. }
  2188. size_t
  2189. Recycler::FinishMark(DWORD waitTime)
  2190. {
  2191. size_t scannedRootBytes = RescanMark(waitTime);
  2192. Assert(waitTime != INFINITE || scannedRootBytes != Recycler::InvalidScanRootBytes);
  2193. if (scannedRootBytes != Recycler::InvalidScanRootBytes)
  2194. {
  2195. #if DBG && ENABLE_PARTIAL_GC
  2196. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("CTO: %d\n"), this->clientTrackedObjectList.Count());
  2197. #endif
  2198. #if ENABLE_PARTIAL_GC
  2199. if (this->inPartialCollectMode)
  2200. {
  2201. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing client tracked objects\n"));
  2202. ProcessClientTrackedObjects();
  2203. }
  2204. else
  2205. #endif
  2206. #if ENABLE_CONCURRENT_GC
  2207. if (DoQueueTrackedObject())
  2208. {
  2209. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing regular tracked objects\n"));
  2210. ProcessTrackedObjects();
  2211. #ifdef RECYCLER_WRITE_WATCH
  2212. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2213. {
  2214. Assert(this->backgroundFinishMarkCount == 0 || autoHeap.GetWriteWatchPageCount() == 0);
  2215. }
  2216. #endif
  2217. }
  2218. #endif
  2219. // Continue to mark from root one more time
  2220. scannedRootBytes += RootMark(CollectionStateRescanMark);
  2221. }
  2222. return scannedRootBytes;
  2223. }
  2224. #if ENABLE_CONCURRENT_GC
  2225. void
  2226. Recycler::DoParallelMark()
  2227. {
  2228. Assert(this->enableParallelMark);
  2229. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2230. // Split the mark stack into [this->maxParallelism] equal pieces.
  2231. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2232. MarkContext * splitContexts[3] = { &parallelMarkContext1, &parallelMarkContext2, &parallelMarkContext3 };
  2233. uint actualSplitCount = markContext.Split(this->maxParallelism - 1, splitContexts);
  2234. Assert(actualSplitCount <= 3);
  2235. // If we failed to split at all, just mark in thread with no parallelism.
  2236. if (actualSplitCount == 0)
  2237. {
  2238. this->ProcessMark(false);
  2239. return;
  2240. }
  2241. // We need to queue tracked objects while we mark in parallel.
  2242. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2243. #if ENABLE_PARTIAL_GC
  2244. if (!this->inPartialCollectMode)
  2245. #endif
  2246. {
  2247. StartQueueTrackedObject();
  2248. }
  2249. // Kick off marking on the background thread
  2250. bool concurrentSuccess = StartConcurrent(CollectionStateParallelMark);
  2251. // If there's enough work to split, then kick off marking on parallel threads too.
  2252. // If the threads haven't been created yet, this will create them (or fail).
  2253. bool parallelSuccess1 = false;
  2254. bool parallelSuccess2 = false;
  2255. if (concurrentSuccess && actualSplitCount >= 2)
  2256. {
  2257. parallelSuccess1 = parallelThread1.StartConcurrent();
  2258. if (parallelSuccess1 && actualSplitCount == 3)
  2259. {
  2260. parallelSuccess2 = parallelThread2.StartConcurrent();
  2261. }
  2262. }
  2263. // Process our portion of the split.
  2264. this->ProcessParallelMark(false, &parallelMarkContext1);
  2265. // If we successfully launched parallel work, wait for it to complete.
  2266. // If we failed, then process the work in-thread now.
  2267. if (concurrentSuccess)
  2268. {
  2269. WaitForConcurrentThread(INFINITE, RecyclerWaitReason::DoParallelMark);
  2270. }
  2271. else
  2272. {
  2273. this->ProcessParallelMark(false, &markContext);
  2274. }
  2275. if (actualSplitCount >= 2)
  2276. {
  2277. if (parallelSuccess1)
  2278. {
  2279. parallelThread1.WaitForConcurrent();
  2280. }
  2281. else
  2282. {
  2283. this->ProcessParallelMark(false, &parallelMarkContext2);
  2284. }
  2285. if (actualSplitCount == 3)
  2286. {
  2287. if (parallelSuccess2)
  2288. {
  2289. parallelThread2.WaitForConcurrent();
  2290. }
  2291. else
  2292. {
  2293. this->ProcessParallelMark(false, &parallelMarkContext3);
  2294. }
  2295. }
  2296. }
  2297. this->SetCollectionState(CollectionStateMark);
  2298. // Process tracked objects, if any, then do one final mark phase in case they marked any new objects.
  2299. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2300. #if ENABLE_PARTIAL_GC
  2301. if (!this->inPartialCollectMode)
  2302. #endif
  2303. {
  2304. this->ProcessTrackedObjects();
  2305. this->ProcessMark(false);
  2306. }
  2307. #if ENABLE_PARTIAL_GC
  2308. else
  2309. {
  2310. Assert(!this->HasPendingTrackObjects());
  2311. }
  2312. #endif
  2313. }
  2314. void
  2315. Recycler::DoBackgroundParallelMark()
  2316. {
  2317. // Split the mark stack into [this->maxParallelism - 1] equal pieces (thus, "- 2" below).
  2318. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2319. // The parallel threads are hardwired to use parallelMarkContext2/3, so we split using those.
  2320. uint actualSplitCount = 0;
  2321. MarkContext * splitContexts[2] = { &parallelMarkContext2, &parallelMarkContext3 };
  2322. if (this->enableParallelMark)
  2323. {
  2324. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2325. if (this->maxParallelism > 2)
  2326. {
  2327. actualSplitCount = markContext.Split(this->maxParallelism - 2, splitContexts);
  2328. }
  2329. }
  2330. Assert(actualSplitCount <= 2);
  2331. // If we failed to split at all, just mark in thread with no parallelism.
  2332. if (actualSplitCount == 0)
  2333. {
  2334. this->ProcessMark(true);
  2335. return;
  2336. }
  2337. #if ENABLE_PARTIAL_GC
  2338. // We should already be set up to queue tracked objects, unless this is a partial collect
  2339. Assert(this->DoQueueTrackedObject() || this->inPartialCollectMode);
  2340. #else
  2341. Assert(this->DoQueueTrackedObject());
  2342. #endif
  2343. this->SetCollectionState(CollectionStateBackgroundParallelMark);
  2344. // Kick off marking on parallel threads too, if there is work for them
  2345. // If the threads haven't been created yet, this will create them (or fail).
  2346. bool parallelSuccess1 = false;
  2347. bool parallelSuccess2 = false;
  2348. parallelSuccess1 = parallelThread1.StartConcurrent();
  2349. if (parallelSuccess1 && actualSplitCount == 2)
  2350. {
  2351. parallelSuccess2 = parallelThread2.StartConcurrent();
  2352. }
  2353. // Process our portion of the split.
  2354. this->ProcessParallelMark(true, &markContext);
  2355. // If we successfully launched parallel work, wait for it to complete.
  2356. // If we failed, then process the work in-thread now.
  2357. if (parallelSuccess1)
  2358. {
  2359. parallelThread1.WaitForConcurrent();
  2360. }
  2361. else
  2362. {
  2363. this->ProcessParallelMark(true, &parallelMarkContext2);
  2364. }
  2365. if (actualSplitCount == 2)
  2366. {
  2367. if (parallelSuccess2)
  2368. {
  2369. parallelThread2.WaitForConcurrent();
  2370. }
  2371. else
  2372. {
  2373. this->ProcessParallelMark(true, &parallelMarkContext3);
  2374. }
  2375. }
  2376. this->SetCollectionState(CollectionStateConcurrentMark);
  2377. }
  2378. #endif
  2379. size_t
  2380. Recycler::RootMark(CollectionState markState)
  2381. {
  2382. size_t scannedRootBytes = 0;
  2383. Assert(!this->NeedOOMRescan() || markState == CollectionStateRescanMark);
  2384. #if ENABLE_PARTIAL_GC
  2385. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect: %d\n"), this->inPartialCollectMode);
  2386. #else
  2387. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect not available\n"));
  2388. #endif
  2389. Assert(collectionState == (markState == CollectionStateMark? CollectionStateFindRoots : CollectionStateRescanFindRoots));
  2390. BOOL stacksScannedByRuntime = FALSE;
  2391. {
  2392. // We are about to scan roots in thread, notify the runtime first so it can stop threads if necessary and also provide additional roots
  2393. AUTO_NO_EXCEPTION_REGION;
  2394. RecyclerScanMemoryCallback scanMemory(this);
  2395. scannedRootBytes += collectionWrapper->RootMarkCallback(scanMemory, &stacksScannedByRuntime);
  2396. }
  2397. scannedRootBytes += FindRoots();
  2398. if (!stacksScannedByRuntime)
  2399. {
  2400. // The runtime did not scan the stack(s) for us, so we use the normal Recycler code.
  2401. scannedRootBytes += ScanStack();
  2402. }
  2403. this->SetCollectionState(markState);
  2404. #if ENABLE_CONCURRENT_GC
  2405. if (this->enableParallelMark)
  2406. {
  2407. this->DoParallelMark();
  2408. }
  2409. else
  2410. #endif
  2411. {
  2412. this->ProcessMark(false);
  2413. }
  2414. if (this->EndMark())
  2415. {
  2416. // REVIEW: This heuristic doesn't apply when partial is off so there's no need
  2417. // to modify scannedRootBytes here, correct?
  2418. #if ENABLE_PARTIAL_GC
  2419. // return large root scanned byte to not get into partial mode if we are low on memory
  2420. scannedRootBytes = RecyclerSweepManager::MaxPartialCollectRescanRootBytes + 1;
  2421. #endif
  2422. }
  2423. return scannedRootBytes;
  2424. }
  2425. bool
  2426. Recycler::EndMarkCheckOOMRescan()
  2427. {
  2428. bool oomRescan = false;
  2429. if (this->NeedOOMRescan())
  2430. {
  2431. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  2432. if (this->objectGraphDumper)
  2433. {
  2434. // Do not complete the mark if we are just dumping the object graph
  2435. // Just report out of memory
  2436. this->objectGraphDumper->isOutOfMemory = true;
  2437. this->ClearNeedOOMRescan();
  2438. }
  2439. else
  2440. #endif
  2441. {
  2442. EndMarkOnLowMemory();
  2443. oomRescan = true;
  2444. }
  2445. }
  2446. // Done with the mark stack, it should be empty.
  2447. // Release pages it is holding.
  2448. Assert(!HasPendingMarkObjects());
  2449. Assert(!HasPendingTrackObjects());
  2450. return oomRescan;
  2451. }
  2452. bool
  2453. Recycler::EndMark()
  2454. {
  2455. #if ENABLE_CONCURRENT_GC
  2456. Assert(!this->DoQueueTrackedObject());
  2457. #endif
  2458. #if ENABLE_PARTIAL_GC
  2459. Assert(this->clientTrackedObjectList.Empty());
  2460. #endif
  2461. {
  2462. // We have finished marking
  2463. AUTO_NO_EXCEPTION_REGION;
  2464. collectionWrapper->EndMarkCallback();
  2465. }
  2466. bool oomRescan = EndMarkCheckOOMRescan();
  2467. if (ProcessObjectBeforeCollectCallbacks())
  2468. {
  2469. // callbacks may trigger additional marking, need to check OOMRescan again
  2470. oomRescan |= EndMarkCheckOOMRescan();
  2471. }
  2472. // GC-CONSIDER: Consider keeping some page around
  2473. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_START, (this));
  2474. // Clean up mark contexts, which will release held free pages
  2475. // Do this for all contexts before we decommit, to make sure all pages are freed
  2476. markContext.Cleanup();
  2477. parallelMarkContext1.Cleanup();
  2478. parallelMarkContext2.Cleanup();
  2479. parallelMarkContext3.Cleanup();
  2480. // Decommit all pages
  2481. markContext.DecommitPages();
  2482. parallelMarkContext1.DecommitPages();
  2483. parallelMarkContext2.DecommitPages();
  2484. parallelMarkContext3.DecommitPages();
  2485. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_STOP, (this));
  2486. return oomRescan;
  2487. }
  2488. void
  2489. Recycler::EndMarkOnLowMemory()
  2490. {
  2491. GCETW(GC_ENDMARKONLOWMEMORY_START, (this));
  2492. Assert(this->NeedOOMRescan());
  2493. this->inEndMarkOnLowMemory = true;
  2494. // Treat this as a concurrent mark reset so that we don't invalidate the allocators
  2495. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("OOM during mark- rerunning mark\n"));
  2496. // Try to release as much memory as possible
  2497. autoHeap.DecommitNow();
  2498. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2499. uint iterations = 0;
  2500. #endif
  2501. do
  2502. {
  2503. #if ENABLE_PARTIAL_GC
  2504. Assert(this->clientTrackedObjectList.Empty());
  2505. #endif
  2506. #if ENABLE_CONCURRENT_GC
  2507. // Always queue tracked objects during rescan, to avoid changes to mark state.
  2508. // (Unless we're in a partial, in which case we ignore tracked objects)
  2509. Assert(!this->DoQueueTrackedObject());
  2510. #if ENABLE_PARTIAL_GC
  2511. if (!this->inPartialCollectMode)
  2512. #endif
  2513. {
  2514. this->StartQueueTrackedObject();
  2515. }
  2516. #endif
  2517. this->SetCollectionState(CollectionStateRescanFindRoots);
  2518. this->ClearNeedOOMRescan();
  2519. #if DBG
  2520. Assert(!this->isProcessingRescan);
  2521. this->isProcessingRescan = true;
  2522. #endif
  2523. if (!heapBlockMap.OOMRescan(this))
  2524. {
  2525. // Kill the process- we couldn't even rescan a single block
  2526. // We are in pretty low memory state at this point
  2527. // The fail-fast is present for two reasons:
  2528. // 1) Defense-in-depth for cases we hadn't thought about
  2529. // 2) Deal with cases like -MaxMarkStackPageCount:1 which can still hang without the fail-fast
  2530. MarkStack_OOM_unrecoverable_error();
  2531. }
  2532. autoHeap.Rescan(RescanFlags_None);
  2533. DebugOnly(this->isProcessingRescan = false);
  2534. this->ProcessMark(false);
  2535. #if ENABLE_CONCURRENT_GC
  2536. // Process any tracked objects we found
  2537. #if ENABLE_PARTIAL_GC
  2538. if (!this->inPartialCollectMode)
  2539. #endif
  2540. {
  2541. ProcessTrackedObjects();
  2542. }
  2543. #endif
  2544. // Drain the mark stack
  2545. ProcessMark(false);
  2546. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2547. iterations++;
  2548. #endif
  2549. }
  2550. while (this->NeedOOMRescan());
  2551. Assert(!markContext.GetPageAllocator()->DisableAllocationOutOfMemory());
  2552. Assert(!parallelMarkContext1.GetPageAllocator()->DisableAllocationOutOfMemory());
  2553. Assert(!parallelMarkContext2.GetPageAllocator()->DisableAllocationOutOfMemory());
  2554. Assert(!parallelMarkContext3.GetPageAllocator()->DisableAllocationOutOfMemory());
  2555. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::RecyclerPhase, _u("EndMarkOnLowMemory iterations: %d\n"), iterations);
  2556. #if ENABLE_PARTIAL_GC
  2557. Assert(this->clientTrackedObjectList.Empty());
  2558. #endif
  2559. #if ENABLE_CONCURRENT_GC
  2560. Assert(!this->DoQueueTrackedObject());
  2561. #endif
  2562. this->inEndMarkOnLowMemory = false;
  2563. #if ENABLE_PARTIAL_GC
  2564. if (this->inPartialCollectMode)
  2565. {
  2566. this->FinishPartialCollect();
  2567. }
  2568. #endif
  2569. GCETW(GC_ENDMARKONLOWMEMORY_STOP, (this));
  2570. }
  2571. #if DBG
  2572. bool
  2573. Recycler::IsMarkStackEmpty()
  2574. {
  2575. return (markContext.IsEmpty() && parallelMarkContext1.IsEmpty() && parallelMarkContext2.IsEmpty() && parallelMarkContext3.IsEmpty());
  2576. }
  2577. #endif
  2578. #ifdef HEAP_ENUMERATION_VALIDATION
  2579. void
  2580. Recycler::PostHeapEnumScan(PostHeapEnumScanCallback callback, void *data)
  2581. {
  2582. this->pfPostHeapEnumScanCallback = callback;
  2583. this->postHeapEnunScanData = data;
  2584. FindRoots();
  2585. ProcessMark(false);
  2586. this->pfPostHeapEnumScanCallback = NULL;
  2587. this->postHeapEnunScanData = NULL;
  2588. }
  2589. #endif
  2590. #if ENABLE_CONCURRENT_GC
  2591. bool
  2592. Recycler::QueueTrackedObject(FinalizableObject * trackableObject)
  2593. {
  2594. return markContext.AddTrackedObject(trackableObject);
  2595. }
  2596. #endif
  2597. bool
  2598. Recycler::FindImplicitRootObject(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2599. {
  2600. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2601. if (heapBlock == nullptr)
  2602. {
  2603. return false;
  2604. }
  2605. if (heapBlock->GetHeapBlockType() < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount)
  2606. {
  2607. return ((SmallHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2608. }
  2609. else if (!heapBlock->IsLargeHeapBlock())
  2610. {
  2611. return ((MediumHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2612. }
  2613. else
  2614. {
  2615. return ((LargeHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2616. }
  2617. }
  2618. bool
  2619. Recycler::FindHeapObject(void* candidate, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject)
  2620. {
  2621. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2622. return heapBlock && heapBlock->FindHeapObject(candidate, this, flags, heapObject);
  2623. }
  2624. bool
  2625. Recycler::FindHeapObjectWithClearedAllocators(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2626. {
  2627. // Heap enum has some case where it allocates, so we can't assert
  2628. Assert(autoHeap.AllocatorsAreEmpty() || this->isHeapEnumInProgress);
  2629. return FindHeapObject(candidate, FindHeapObjectFlags_ClearedAllocators, heapObject);
  2630. }
  2631. void*
  2632. Recycler::GetRealAddressFromInterior(void* candidate)
  2633. {
  2634. HeapBlock * heapBlock = heapBlockMap.GetHeapBlock(candidate);
  2635. if (heapBlock == NULL)
  2636. {
  2637. return NULL;
  2638. }
  2639. return heapBlock->GetRealAddressFromInterior(candidate);
  2640. }
  2641. /*------------------------------------------------------------------------------------------------
  2642. * Sweep
  2643. *------------------------------------------------------------------------------------------------*/
  2644. #if ENABLE_PARTIAL_GC
  2645. bool
  2646. Recycler::Sweep(size_t rescanRootBytes, bool concurrent, bool adjustPartialHeuristics)
  2647. #else
  2648. bool
  2649. Recycler::Sweep(bool concurrent)
  2650. #endif
  2651. {
  2652. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2653. Assert(!this->hasBackgroundFinishPartial);
  2654. #endif
  2655. #if ENABLE_CONCURRENT_GC
  2656. if (!this->enableConcurrentSweep)
  2657. #endif
  2658. {
  2659. concurrent = false;
  2660. }
  2661. RECYCLER_PROFILE_EXEC_BEGIN(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2662. #if ENABLE_PARTIAL_GC
  2663. recyclerSweepManagerInstance.BeginSweep(this, rescanRootBytes, adjustPartialHeuristics);
  2664. #else
  2665. recyclerSweepManagerInstance.BeginSweep(this);
  2666. #endif
  2667. this->SweepHeap(concurrent, *recyclerSweepManager);
  2668. #if ENABLE_CONCURRENT_GC
  2669. if (concurrent)
  2670. {
  2671. // If we finished mark in the background, all the relevant write watches should already be reset
  2672. // Only reset write watch if we didn't finish mark in the background
  2673. if (this->backgroundFinishMarkCount == 0)
  2674. {
  2675. #if ENABLE_PARTIAL_GC
  2676. if (this->inPartialCollectMode)
  2677. {
  2678. #ifdef RECYCLER_WRITE_WATCH
  2679. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2680. {
  2681. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  2682. if (!autoHeap.ResetWriteWatch())
  2683. {
  2684. // Shouldn't happen
  2685. Assert(false);
  2686. // Disable partial collect
  2687. this->enablePartialCollect = false;
  2688. // We haven't done any partial collection yet, just get out of partial collect mode
  2689. this->inPartialCollectMode = false;
  2690. }
  2691. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  2692. }
  2693. #endif
  2694. }
  2695. #endif
  2696. }
  2697. }
  2698. else
  2699. #endif
  2700. {
  2701. recyclerSweepManager->FinishSweep();
  2702. recyclerSweepManager->EndSweep();
  2703. }
  2704. RECYCLER_PROFILE_EXEC_END(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2705. this->SetCollectionState(CollectionStatePostSweepRedeferralCallback);
  2706. // Note that PostSweepRedeferralCallback can't have exception escape.
  2707. collectionWrapper->PostSweepRedeferralCallBack();
  2708. #if ENABLE_CONCURRENT_GC
  2709. if (concurrent)
  2710. {
  2711. bool needForceForground = !StartConcurrent(CollectionStateConcurrentSweep);
  2712. if(needForceForground)
  2713. {
  2714. // Failed to spawn the concurrent sweep.
  2715. // Instead, force the concurrent sweep to happen right here in thread.
  2716. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  2717. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  2718. {
  2719. this->allowAllocationsDuringConcurrentSweepForCollection = false;
  2720. }
  2721. #endif
  2722. this->SetCollectionState(CollectionStateConcurrentSweep);
  2723. DoBackgroundWork(true);
  2724. // Continue as if the concurrent sweep were executing
  2725. // Next time we check for completion, we will finish the sweep just as if it had happened out of thread.
  2726. }
  2727. #ifdef ENABLE_JS_ETW
  2728. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_StartedConcurrent;
  2729. #endif
  2730. return true;
  2731. }
  2732. #endif
  2733. #ifdef ENABLE_JS_ETW
  2734. // The false below just means we don't need a concurrent sweep as we have completed a sweep above.
  2735. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Completed;
  2736. #endif
  2737. return false;
  2738. }
  2739. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2740. void Recycler::DisplayMemStats()
  2741. {
  2742. #ifdef PERF_COUNTERS
  2743. #if DBG_DUMP
  2744. Output::Print(_u("Recycler Live Object Count %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectCounter().GetValue());
  2745. Output::Print(_u("Recycler Live Object Size %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectSizeCounter().GetValue());
  2746. #endif
  2747. Output::Print(_u("Recycler Used Page Size %u\n"), PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(PageAllocatorType::PageAllocatorType_Recycler).GetValue());
  2748. #endif
  2749. }
  2750. #endif
  2751. CollectedRecyclerWeakRefHeapBlock CollectedRecyclerWeakRefHeapBlock::Instance;
  2752. void
  2753. Recycler::SweepWeakReference()
  2754. {
  2755. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::SweepWeakPhase);
  2756. GCETW(GC_SWEEP_WEAKREF_START, (this));
  2757. // REVIEW: Clean up the weak reference map concurrently?
  2758. bool hasCleanup = false;
  2759. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2760. uint scannedCount = weakReferenceMap.Count();
  2761. #endif
  2762. weakReferenceMap.Map([&hasCleanup](RecyclerWeakReferenceBase * weakRef) -> bool
  2763. {
  2764. if (!weakRef->weakRefHeapBlock->TestObjectMarkedBit(weakRef))
  2765. {
  2766. hasCleanup = true;
  2767. // Remove
  2768. return false;
  2769. }
  2770. if (!weakRef->strongRefHeapBlock->TestObjectMarkedBit(weakRef->strongRef))
  2771. {
  2772. hasCleanup = true;
  2773. weakRef->strongRef = nullptr;
  2774. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  2775. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  2776. // Remove
  2777. return false;
  2778. }
  2779. return true;
  2780. });
  2781. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2782. uint regionScannedCount = 0;
  2783. uint regionClearedCount = 0;
  2784. #endif
  2785. #if ENABLE_WEAK_REFERENCE_REGIONS
  2786. auto edIt = this->weakReferenceRegionList.GetEditingIterator();
  2787. while (edIt.Next())
  2788. {
  2789. RecyclerWeakReferenceRegion region = edIt.Data();
  2790. // We want to see if user code has any reference to the region, if not, we can free the whole thing
  2791. if (!region.GetHeapBlock()->TestObjectMarkedBit(region.GetPtr()))
  2792. {
  2793. edIt.RemoveCurrent();
  2794. hasCleanup = true;
  2795. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2796. regionClearedCount += (uint)region.GetCount();
  2797. #endif
  2798. continue;
  2799. }
  2800. // The region is referenced, clean up any stale weak references
  2801. RecyclerWeakReferenceRegionItem<void*>* refs = region.GetPtr();
  2802. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2803. regionScannedCount += (uint)region.GetCount();
  2804. #endif
  2805. for (size_t i = 0; i < region.GetCount(); ++i)
  2806. {
  2807. RecyclerWeakReferenceRegionItem<void*> &ref = refs[i];
  2808. if (ref.ptr == nullptr)
  2809. {
  2810. continue;
  2811. }
  2812. if (((uintptr_t)ref.heapBlock & 0x1) == 0x1)
  2813. {
  2814. // Background thread marked this ref. Unmark it, and keep it
  2815. ref.heapBlock = (HeapBlock*)((uintptr_t)ref.heapBlock & ~0x1);
  2816. continue;
  2817. }
  2818. if (ref.heapBlock == nullptr)
  2819. {
  2820. HeapBlock* block = this->FindHeapBlock(ref.ptr);
  2821. if (block == nullptr)
  2822. {
  2823. // This is not a real reference
  2824. AssertMsg(false, "WeakReferenceRegionItems should only contain recycler references");
  2825. continue;
  2826. }
  2827. else
  2828. {
  2829. ref.heapBlock = block;
  2830. }
  2831. }
  2832. if (!ref.heapBlock->TestObjectMarkedBit(ref))
  2833. {
  2834. ref.ptr = nullptr;
  2835. ref.heapBlock = nullptr;
  2836. hasCleanup = true;
  2837. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2838. regionClearedCount++;
  2839. #endif
  2840. }
  2841. }
  2842. }
  2843. #endif
  2844. this->weakReferenceCleanupId += hasCleanup;
  2845. #if defined(GCETW) && defined(ENABLE_JS_ETW)
  2846. const uint keptCount = weakReferenceMap.Count();
  2847. GCETW(GC_SWEEP_WEAKREF_STOP_EX, (this, scannedCount, (scannedCount - keptCount), regionScannedCount, regionClearedCount));
  2848. #endif
  2849. RECYCLER_PROFILE_EXEC_END(this, Js::SweepWeakPhase);
  2850. }
  2851. void
  2852. Recycler::SweepHeap(bool concurrent, RecyclerSweepManager& recyclerSweepManager)
  2853. {
  2854. Assert(!this->hasPendingDeleteGuestArena);
  2855. Assert(!this->isHeapEnumInProgress);
  2856. #if ENABLE_CONCURRENT_GC
  2857. Assert(!this->DoQueueTrackedObject());
  2858. if (concurrent)
  2859. {
  2860. SetCollectionState(CollectionStateSetupConcurrentSweep);
  2861. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2862. if (CONFIG_FLAG(EnableBGFreeZero))
  2863. {
  2864. autoHeap.StartQueueZeroPage();
  2865. }
  2866. #endif
  2867. }
  2868. else
  2869. #endif
  2870. {
  2871. Assert(!concurrent);
  2872. SetCollectionState(CollectionStateSweep);
  2873. }
  2874. this->SweepWeakReference();
  2875. #if ENABLE_CONCURRENT_GC
  2876. if (concurrent)
  2877. {
  2878. GCETW(GC_SETUPBACKGROUNDSWEEP_START, (this));
  2879. }
  2880. else
  2881. #endif
  2882. {
  2883. GCETW(GC_SWEEP_START, (this));
  2884. }
  2885. autoHeap.FinalizeAndSweep(recyclerSweepManager, concurrent);
  2886. #if ENABLE_CONCURRENT_GC
  2887. if (concurrent)
  2888. {
  2889. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2890. if (CONFIG_FLAG(EnableBGFreeZero))
  2891. {
  2892. autoHeap.StopQueueZeroPage();
  2893. }
  2894. #endif
  2895. GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
  2896. }
  2897. else
  2898. {
  2899. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2900. if (CONFIG_FLAG(EnableBGFreeZero))
  2901. {
  2902. Assert(!autoHeap.HasZeroQueuedPages());
  2903. }
  2904. #endif
  2905. uint sweptBytes = 0;
  2906. #ifdef RECYCLER_STATS
  2907. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2908. #endif
  2909. GCETW(GC_SWEEP_STOP, (this, sweptBytes));
  2910. }
  2911. #endif
  2912. }
  2913. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2914. void
  2915. Recycler::BackgroundFinishPartialCollect(RecyclerSweepManager * recyclerSweepManager)
  2916. {
  2917. Assert(this->inPartialCollectMode);
  2918. Assert(recyclerSweepManager != nullptr && recyclerSweepManager->IsBackground());
  2919. this->hasBackgroundFinishPartial = true;
  2920. this->autoHeap.FinishPartialCollect(recyclerSweepManager);
  2921. this->inPartialCollectMode = false;
  2922. }
  2923. #endif
  2924. void
  2925. Recycler::DisposeObjects()
  2926. {
  2927. Assert(this->allowDispose && this->hasDisposableObject && !this->inDispose);
  2928. Assert(!isHeapEnumInProgress);
  2929. GCETW(GC_DISPOSE_START, (this));
  2930. ASYNC_HOST_OPERATION_START(collectionWrapper);
  2931. this->inDispose = true;
  2932. #ifdef PROFILE_RECYCLER_ALLOC
  2933. // finalizer may allocate memory and dispose object can happen in the middle of allocation
  2934. // save and restore the tracked object info
  2935. TrackAllocData oldAllocData = { 0 };
  2936. if (trackerDictionary != nullptr)
  2937. {
  2938. oldAllocData = nextAllocData;
  2939. nextAllocData.Clear();
  2940. }
  2941. #endif
  2942. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2943. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2944. {
  2945. Output::Print(_u("Disposing objects\n"));
  2946. }
  2947. #endif
  2948. // Disable dispose within this method, restore it when we're done
  2949. AutoRestoreValue<bool> disableDispose(&this->allowDispose, false);
  2950. #ifdef FAULT_INJECTION
  2951. this->collectionWrapper->DisposeScriptContextByFaultInjectionCallBack();
  2952. #endif
  2953. this->collectionWrapper->PreDisposeObjectsCallBack();
  2954. // Scope timestamp to just dispose
  2955. {
  2956. AUTO_TIMESTAMP(dispose);
  2957. autoHeap.DisposeObjects();
  2958. }
  2959. #ifdef PROFILE_RECYCLER_ALLOC
  2960. if (trackerDictionary != nullptr)
  2961. {
  2962. Assert(nextAllocData.IsEmpty());
  2963. nextAllocData = oldAllocData;
  2964. }
  2965. #endif
  2966. #ifdef ENABLE_PROJECTION
  2967. {
  2968. Assert(!this->inResolveExternalWeakReferences);
  2969. Assert(!this->allowDispose);
  2970. #if DBG || defined RECYCLER_TRACE
  2971. AutoRestoreValue<bool> inResolveExternalWeakReferencedObjects(&this->inResolveExternalWeakReferences, true);
  2972. #endif
  2973. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  2974. // This is where it is safe to resolve external weak references as they can lead to new script entry
  2975. collectionWrapper->ResolveExternalWeakReferencedObjects();
  2976. }
  2977. #endif
  2978. Assert(!this->inResolveExternalWeakReferences);
  2979. Assert(this->inDispose);
  2980. this->inDispose = false;
  2981. ASYNC_HOST_OPERATION_END(collectionWrapper);
  2982. uint sweptBytes = 0;
  2983. #ifdef RECYCLER_STATS
  2984. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2985. #endif
  2986. GCETW(GC_DISPOSE_STOP, (this, sweptBytes));
  2987. }
  2988. bool
  2989. Recycler::FinishDisposeObjects()
  2990. {
  2991. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] AllowDispose in FinishDisposeObject: %d\n"), this->allowDispose);
  2992. if (this->hasDisposableObject && this->allowDispose)
  2993. {
  2994. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] FinishDisposeObject, calling Dispose: %d\n"), this->allowDispose);
  2995. #ifdef RECYCLER_TRACE
  2996. CollectionParam savedCollectionParam = collectionParam;
  2997. #endif
  2998. DisposeObjects();
  2999. #ifdef RECYCLER_TRACE
  3000. collectionParam = savedCollectionParam;
  3001. #endif
  3002. // FinishDisposeObjects is always called either during a collection,
  3003. // or we will check the NeedExhaustiveRepeatCollect(), so no need to check it here
  3004. return true;
  3005. }
  3006. #ifdef RECYCLER_TRACE
  3007. if (!this->inDispose && this->hasDisposableObject
  3008. && GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3009. {
  3010. Output::Print(_u("%04X> RC(%p): %s %d\n"), this->mainThreadId, this, _u("Dispose object delayed"), static_cast<CollectionState>(this->collectionState));
  3011. }
  3012. #endif
  3013. return false;
  3014. }
  3015. template bool Recycler::FinishDisposeObjectsNow<FinishDispose>();
  3016. template bool Recycler::FinishDisposeObjectsNow<FinishDisposeTimed>();
  3017. template <CollectionFlags flags>
  3018. bool
  3019. Recycler::FinishDisposeObjectsNow()
  3020. {
  3021. if (inDisposeWrapper)
  3022. {
  3023. return false;
  3024. }
  3025. return FinishDisposeObjectsWrapped<flags>();
  3026. }
  3027. template <CollectionFlags flags>
  3028. inline
  3029. bool
  3030. Recycler::FinishDisposeObjectsWrapped()
  3031. {
  3032. const BOOL allowDisposeFlag = flags & CollectOverride_AllowDispose;
  3033. if (allowDisposeFlag)
  3034. {
  3035. // Disposing objects can have reentrancy, make sure there is no reentrancy lock when calling Dispose
  3036. DebugOnly(collectionWrapper->CheckJsReentrancyOnDispose());
  3037. if (this->NeedDispose())
  3038. {
  3039. if ((flags & CollectHeuristic_TimeIfScriptActive) == CollectHeuristic_TimeIfScriptActive)
  3040. {
  3041. if (!this->NeedDisposeTimed())
  3042. {
  3043. return false;
  3044. }
  3045. }
  3046. this->allowDispose = true;
  3047. this->inDisposeWrapper = true;
  3048. #ifdef RECYCLER_TRACE
  3049. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3050. {
  3051. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Process delayed dispose object"));
  3052. }
  3053. #endif
  3054. collectionWrapper->DisposeObjects(this);
  3055. // Dispose may get into message loop and cause a reentrant GC. If those don't allow reentrant
  3056. // it will get added to a pending collect request.
  3057. // FinishDisposedObjectsWrapped/DisposeObjectsWrapped is called at a place that might not be during a collection
  3058. // and won't check NeedExhaustiveRepeatCollect(), need to check it here to honor those requests
  3059. if (!this->CollectionInProgress() && NeedExhaustiveRepeatCollect() && ((flags & CollectOverride_NoExhaustiveCollect) != CollectOverride_NoExhaustiveCollect))
  3060. {
  3061. #ifdef RECYCLER_TRACE
  3062. CaptureCollectionParam((CollectionFlags)(flags & ~CollectMode_Partial), true);
  3063. #endif
  3064. DoCollectWrapped((CollectionFlags)(flags & ~CollectMode_Partial));
  3065. }
  3066. this->inDisposeWrapper = false;
  3067. return true;
  3068. }
  3069. }
  3070. return false;
  3071. }
  3072. /*------------------------------------------------------------------------------------------------
  3073. * Collect
  3074. *------------------------------------------------------------------------------------------------*/
  3075. BOOL
  3076. Recycler::CollectOnAllocatorThread()
  3077. {
  3078. #if ENABLE_PARTIAL_GC
  3079. Assert(!inPartialCollectMode);
  3080. #endif
  3081. #ifdef RECYCLER_TRACE
  3082. PrintCollectTrace(Js::GarbageCollectPhase);
  3083. #endif
  3084. this->CollectionBegin<Js::GarbageCollectPhase>();
  3085. this->Mark();
  3086. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3087. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3088. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3089. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3090. // turned off, it will remain off until a concurrent GC happens
  3091. this->Sweep();
  3092. this->CollectionEnd<Js::GarbageCollectPhase>();
  3093. FinishCollection();
  3094. return true;
  3095. }
  3096. // Explicitly instantiate all possible modes
  3097. template BOOL Recycler::CollectNow<CollectOnScriptIdle>();
  3098. template BOOL Recycler::CollectNow<CollectOnScriptExit>();
  3099. template BOOL Recycler::CollectNow<CollectOnAllocation>();
  3100. template BOOL Recycler::CollectNow<CollectOnTypedArrayAllocation>();
  3101. template BOOL Recycler::CollectNow<CollectOnScriptCloseNonPrimary>();
  3102. template BOOL Recycler::CollectNow<CollectExhaustiveCandidate>();
  3103. template BOOL Recycler::CollectNow<CollectNowConcurrent>();
  3104. template BOOL Recycler::CollectNow<CollectNowExhaustive>();
  3105. template BOOL Recycler::CollectNow<CollectNowDecommitNowExplicit>();
  3106. template BOOL Recycler::CollectNow<CollectNowPartial>();
  3107. template BOOL Recycler::CollectNow<CollectNowConcurrentPartial>();
  3108. template BOOL Recycler::CollectNow<CollectNowForceInThread>();
  3109. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternal>();
  3110. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalNoStack>();
  3111. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalExhaustive>();
  3112. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalExhaustiveNoStack>();
  3113. template BOOL Recycler::CollectNow<CollectOnRecoverFromOutOfMemory>();
  3114. template BOOL Recycler::CollectNow<CollectNowDefault>();
  3115. template BOOL Recycler::CollectNow<CollectOnSuspendCleanup>();
  3116. template BOOL Recycler::CollectNow<CollectNowDefaultLSCleanup>();
  3117. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  3118. template BOOL Recycler::CollectNow<CollectNowFinalGC>();
  3119. #endif
  3120. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3121. template BOOL Recycler::CollectNow<CollectNowExhaustiveSkipStack>();
  3122. #endif
  3123. template <CollectionFlags flags>
  3124. BOOL
  3125. Recycler::CollectNow()
  3126. {
  3127. // Force-in-thread cannot be concurrent or partial
  3128. CompileAssert((flags & CollectOverride_ForceInThread) == 0 || (flags & (CollectMode_Concurrent | CollectMode_Partial)) == 0);
  3129. // Collections not allowed when the recycler is currently executing the PostCollectionCallback
  3130. if (this->IsAllocatableCallbackState())
  3131. {
  3132. return false;
  3133. }
  3134. #if ENABLE_DEBUG_CONFIG_OPTIONS
  3135. if ((disableCollection && (flags & CollectOverride_Explicit) == 0) || isShuttingDown)
  3136. #else
  3137. if (isShuttingDown)
  3138. #endif
  3139. {
  3140. Assert(collectionState == CollectionStateNotCollecting
  3141. || collectionState == CollectionStateExit
  3142. || this->isShuttingDown);
  3143. return false;
  3144. }
  3145. if (flags & CollectOverride_ExhaustiveCandidate)
  3146. {
  3147. return CollectWithExhaustiveCandidate<flags>();
  3148. }
  3149. return CollectInternal<flags>();
  3150. }
  3151. template <CollectionFlags flags>
  3152. BOOL
  3153. Recycler::GetPartialFlag()
  3154. {
  3155. #if ENABLE_PARTIAL_GC
  3156. #pragma prefast(suppress:6313, "flags is a template parameter and can be 0")
  3157. return(flags & CollectMode_Partial) && inPartialCollectMode;
  3158. #else
  3159. return false;
  3160. #endif
  3161. }
  3162. template <CollectionFlags flags>
  3163. BOOL
  3164. Recycler::CollectWithExhaustiveCandidate()
  3165. {
  3166. Assert(flags & CollectOverride_ExhaustiveCandidate);
  3167. // Currently we don't have any exhaustive candidate that has heuristic.
  3168. Assert((flags & CollectHeuristic_Mask & ~CollectHeuristic_Never) == 0);
  3169. this->hasExhaustiveCandidate = true;
  3170. if (flags & CollectHeuristic_Never)
  3171. {
  3172. // This is just an exhaustive candidate notification. Don't trigger a GC.
  3173. return false;
  3174. }
  3175. // Continue with the GC heuristic
  3176. return CollectInternal<flags>();
  3177. }
  3178. template <CollectionFlags flags>
  3179. BOOL
  3180. Recycler::CollectInternal()
  3181. {
  3182. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3183. Assert((flags & CollectHeuristic_Never) == 0);
  3184. // If we're in a re-entrant state, we want to allow GC to be triggered only
  3185. // from allocation (or trigger points with AllowReentrant). This is to minimize
  3186. // the number of reentrant GCs
  3187. if ((flags & CollectOverride_AllowReentrant) == 0 && this->inDispose)
  3188. {
  3189. return false;
  3190. }
  3191. #ifdef RECYCLER_TRACE
  3192. CaptureCollectionParam(flags);
  3193. #endif
  3194. #if ENABLE_CONCURRENT_GC
  3195. const BOOL concurrent = flags & CollectMode_Concurrent;
  3196. const BOOL finishConcurrent = flags & CollectOverride_FinishConcurrent;
  3197. // If we priority boosted, we should try to finish it every chance we get
  3198. // Otherwise, we should finishing it if we are not doing a concurrent GC,
  3199. // or the flags tell us to always try to finish a concurrent GC (CollectOverride_FinishConcurrent)
  3200. if ((!concurrent || finishConcurrent || priorityBoost) && this->CollectionInProgress())
  3201. {
  3202. return TryFinishConcurrentCollect<flags>();
  3203. }
  3204. #endif
  3205. if (flags & CollectHeuristic_Mask)
  3206. {
  3207. // Check some heuristics first before starting a collection
  3208. return CollectWithHeuristic<flags>();
  3209. }
  3210. // Start a collection now.
  3211. return Collect<flags>();
  3212. }
  3213. template <CollectionFlags flags>
  3214. BOOL
  3215. Recycler::CollectWithHeuristic()
  3216. {
  3217. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3218. Assert((flags & CollectHeuristic_Never) == 0);
  3219. BOOL isScriptContextCloseGCPending = FALSE;
  3220. const BOOL allocSize = flags & CollectHeuristic_AllocSize;
  3221. const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
  3222. const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
  3223. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (flags & CollectHeuristic_Time);
  3224. if ((flags & CollectOverride_CheckScriptContextClose) != 0)
  3225. {
  3226. isScriptContextCloseGCPending = this->collectionWrapper->GetIsScriptContextCloseGCPending();
  3227. }
  3228. // If there is a script context close GC pending, we need to do a GC regardless
  3229. // Otherwise, we should check the heuristics to see if a GC is necessary
  3230. if (!isScriptContextCloseGCPending)
  3231. {
  3232. #if ENABLE_PARTIAL_GC
  3233. if (GetPartialFlag<flags>())
  3234. {
  3235. Assert(enablePartialCollect);
  3236. Assert(allocSize);
  3237. Assert(this->uncollectedNewPageCountPartialCollect >= RecyclerSweepManager::MinPartialUncollectedNewPageCount
  3238. && this->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
  3239. // PARTIAL-GC-REVIEW: For now, we have only alloc size heuristic
  3240. // Maybe improve this heuristic by looking at how many free pages are in the page allocator.
  3241. if (autoHeap.uncollectedNewPageCount > this->uncollectedNewPageCountPartialCollect)
  3242. {
  3243. #ifdef ENABLE_JS_ETW
  3244. if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason))
  3245. {
  3246. collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Partial_GC_AllocSize_Heuristic;
  3247. }
  3248. #endif
  3249. return Collect<flags>();
  3250. }
  3251. }
  3252. #endif
  3253. // allocation byte count heuristic, collect every 1 MB allocated
  3254. if (allocSize && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::UncollectedAllocBytesCollection()))
  3255. {
  3256. return FinishDisposeObjectsWrapped<flags>();
  3257. }
  3258. // time heuristic, allocate every 1000 clock tick, or 64 MB is allocated in a short time
  3259. if (timed && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::Instance.MaxUncollectedAllocBytes))
  3260. {
  3261. uint currentTickCount = GetTickCount();
  3262. #ifdef RECYCLER_TRACE
  3263. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3264. #endif
  3265. if ((int)(tickCountNextCollection - currentTickCount) >= 0)
  3266. {
  3267. return FinishDisposeObjectsWrapped<flags>();
  3268. }
  3269. }
  3270. #ifdef RECYCLER_TRACE
  3271. else
  3272. {
  3273. uint currentTickCount = GetTickCount();
  3274. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3275. }
  3276. #endif
  3277. }
  3278. // Passed all the heuristic, do some GC work, maybe
  3279. return Collect<(CollectionFlags)(flags & ~CollectMode_Partial)>();
  3280. }
  3281. template <CollectionFlags flags>
  3282. BOOL
  3283. Recycler::Collect()
  3284. {
  3285. #if ENABLE_CONCURRENT_GC
  3286. if (this->CollectionInProgress())
  3287. {
  3288. // If we are forced in thread, we can't be concurrent
  3289. // If we are not concurrent we should have been handled before in CollectInternal and we shouldn't be here
  3290. Assert((flags & CollectOverride_ForceInThread) == 0);
  3291. Assert((flags & CollectMode_Concurrent) != 0);
  3292. return TryFinishConcurrentCollect<flags>();
  3293. }
  3294. #endif
  3295. // We clear the flag indicating that there is a GC pending because
  3296. // of script context close, since we're about to do a GC anyway,
  3297. // since the current GC will suffice.
  3298. this->collectionWrapper->ClearIsScriptContextCloseGCPending();
  3299. SetupPostCollectionFlags<flags>();
  3300. const BOOL partial = GetPartialFlag<flags>();
  3301. CollectionFlags finalFlags = flags;
  3302. if (!partial)
  3303. {
  3304. finalFlags = (CollectionFlags)(flags & ~CollectMode_Partial);
  3305. }
  3306. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3307. // in SetupPostCollectionFlags because we didn't reset the inExhausitvECollection variable if
  3308. // an exception. Use this flag to disable it the assertion if exception occur
  3309. DebugOnly(this->hasIncompleteDoCollect = true);
  3310. {
  3311. RECORD_TIMESTAMP(initialCollectionStartTime);
  3312. #ifdef NTBUILD
  3313. this->telemetryBlock->initialCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3314. this->telemetryBlock->exhaustiveRepeatedCount = 0;
  3315. #endif
  3316. return DoCollectWrapped(finalFlags);
  3317. }
  3318. }
  3319. template <CollectionFlags flags>
  3320. void Recycler::SetupPostCollectionFlags()
  3321. {
  3322. // If we are not in a collection (collection in progress or in dispose), inExhaustiveCollection should not be set
  3323. // Otherwise, we have missed an exhaustive collection.
  3324. Assert(this->hasIncompleteDoCollect ||
  3325. this->CollectionInProgress() || this->inDispose || (!this->inExhaustiveCollection && !this->inDecommitNowCollection));
  3326. // Record whether we want to start exhaustive detection or do decommit now after GC
  3327. const BOOL exhaustive = flags & CollectMode_Exhaustive;
  3328. const BOOL decommitNow = flags & CollectMode_DecommitNow;
  3329. const BOOL cacheCleanup = flags & CollectMode_CacheCleanup;
  3330. if (decommitNow)
  3331. {
  3332. this->inDecommitNowCollection = true;
  3333. }
  3334. if (exhaustive)
  3335. {
  3336. this->inExhaustiveCollection = true;
  3337. }
  3338. if (cacheCleanup)
  3339. {
  3340. this->inCacheCleanupCollection = true;
  3341. }
  3342. }
  3343. BOOL
  3344. Recycler::DoCollectWrapped(CollectionFlags flags)
  3345. {
  3346. #if ENABLE_CONCURRENT_GC
  3347. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  3348. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  3349. #endif
  3350. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  3351. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
  3352. #if ENABLE_CONCURRENT_GC
  3353. Assert(IsConcurrentExecutingState() || IsConcurrentSweepState() || IsConcurrentFinishedState() || !CollectionInProgress());
  3354. #else
  3355. Assert(!CollectionInProgress());
  3356. #endif
  3357. return collected;
  3358. }
  3359. bool
  3360. Recycler::NeedExhaustiveRepeatCollect() const
  3361. {
  3362. return this->inExhaustiveCollection && this->hasExhaustiveCandidate;
  3363. }
  3364. BOOL
  3365. Recycler::DoCollect(CollectionFlags flags)
  3366. {
  3367. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3368. // in SetupPostCollectionFlags because we didn't reset the inExhaustiveCollection variable if
  3369. // an exception. We are not in DoCollect, there shouldn't be any more exception. Reset the flag
  3370. DebugOnly(this->hasIncompleteDoCollect = false);
  3371. #ifdef RECYCLER_MEMORY_VERIFY
  3372. this->Verify(Js::RecyclerPhase);
  3373. #endif
  3374. #ifdef RECYCLER_FINALIZE_CHECK
  3375. this->VerifyFinalize();
  3376. #endif
  3377. #if ENABLE_PARTIAL_GC
  3378. BOOL partial = flags & CollectMode_Partial;
  3379. #if DBG && defined(RECYCLER_DUMP_OBJECT_GRAPH)
  3380. // Can't pass in RecyclerPartialStress and DumpObjectGraphOnCollect or call CollectGarbage with DumpObjectGraph
  3381. if (GetRecyclerFlagsTable().RecyclerPartialStress) {
  3382. Assert(!GetRecyclerFlagsTable().DumpObjectGraphOnCollect && !this->dumpObjectOnceOnCollect);
  3383. } else if (GetRecyclerFlagsTable().DumpObjectGraphOnCollect || this->dumpObjectOnceOnCollect) {
  3384. Assert(!GetRecyclerFlagsTable().RecyclerPartialStress);
  3385. }
  3386. #endif
  3387. #ifdef RECYCLER_STRESS
  3388. if (partial && GetRecyclerFlagsTable().RecyclerPartialStress)
  3389. {
  3390. this->inPartialCollectMode = true;
  3391. this->forcePartialScanStack = true;
  3392. }
  3393. #endif
  3394. #endif
  3395. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  3396. if (dumpObjectOnceOnCollect || GetRecyclerFlagsTable().DumpObjectGraphOnCollect)
  3397. {
  3398. DumpObjectGraph();
  3399. dumpObjectOnceOnCollect = false;
  3400. #if ENABLE_PARTIAL_GC
  3401. // Can't do a partial collect if DumpObjectGraph is set since it'll call FinishPartial
  3402. // which will set inPartialCollectMode to false.
  3403. partial = false;
  3404. #endif
  3405. }
  3406. #endif
  3407. #if ENABLE_CONCURRENT_GC
  3408. const bool concurrent = (flags & CollectMode_Concurrent) != 0;
  3409. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  3410. #else
  3411. const bool concurrent = false;
  3412. #endif
  3413. // Flush the pending dispose objects first if dispose is allowed
  3414. Assert(!this->CollectionInProgress());
  3415. #if ENABLE_CONCURRENT_GC
  3416. Assert(this->backgroundFinishMarkCount == 0);
  3417. #endif
  3418. bool collected = FinishDisposeObjects();
  3419. do
  3420. {
  3421. INC_TIMESTAMP_FIELD(exhaustiveRepeatedCount);
  3422. RECORD_TIMESTAMP(currentCollectionStartTime);
  3423. #ifdef NTBUILD
  3424. this->telemetryBlock->currentCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3425. #endif
  3426. #if ENABLE_CONCURRENT_GC
  3427. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3428. if (this->CollectionInProgress())
  3429. {
  3430. Assert(this->IsConcurrentState());
  3431. Assert(collected);
  3432. if (forceInThread)
  3433. {
  3434. return this->FinishConcurrentCollect(flags);
  3435. }
  3436. return true;
  3437. }
  3438. Assert(this->backgroundFinishMarkCount == 0);
  3439. #endif
  3440. #ifdef ENABLE_JS_ETW
  3441. this->collectionStartFlags = flags;
  3442. if (flags == CollectOnScriptIdle)
  3443. {
  3444. collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_IdleCollect;
  3445. }
  3446. const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
  3447. const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
  3448. if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason) && (flags & CollectHeuristic_Mask))
  3449. {
  3450. if (timedIfScriptActive)
  3451. {
  3452. collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSizeIfScriptActive_Heuristic;
  3453. }
  3454. else if (timedIfInScript)
  3455. {
  3456. collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSizeIfInScript_Heuristic;
  3457. }
  3458. else
  3459. {
  3460. collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSize_Heuristic;
  3461. }
  3462. }
  3463. if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason))
  3464. {
  3465. this->collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_NoHeuristic;
  3466. }
  3467. #endif
  3468. #if DBG || defined RECYCLER_TRACE
  3469. collectionCount++;
  3470. #endif
  3471. this->SetCollectionState(Collection_PreCollection);
  3472. collectionWrapper->PreCollectionCallBack(flags);
  3473. this->SetCollectionState(CollectionStateNotCollecting);
  3474. hasExhaustiveCandidate = false; // reset the candidate detection
  3475. #ifdef RECYCLER_STATS
  3476. #if ENABLE_PARTIAL_GC
  3477. RecyclerCollectionStats oldCollectionStats = collectionStats;
  3478. #endif
  3479. memset(&collectionStats, 0, sizeof(RecyclerCollectionStats));
  3480. this->collectionStats.startCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  3481. #if ENABLE_PARTIAL_GC
  3482. this->collectionStats.startCollectNewPageCount = autoHeap.uncollectedNewPageCount;
  3483. this->collectionStats.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  3484. #endif
  3485. #endif
  3486. #if ENABLE_PARTIAL_GC
  3487. if (partial)
  3488. {
  3489. #if ENABLE_CONCURRENT_GC
  3490. Assert(!forceInThread);
  3491. #endif
  3492. #ifdef RECYCLER_STATS
  3493. // We are only doing a partial GC, copy some old stats
  3494. collectionStats.finalizeCount = oldCollectionStats.finalizeCount;
  3495. memcpy(collectionStats.heapBlockCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3496. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3497. memcpy(collectionStats.heapBlockFreeByteCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3498. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3499. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3500. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3501. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedBytes, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3502. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3503. #endif
  3504. Assert(enablePartialCollect && inPartialCollectMode);
  3505. if (!this->PartialCollect(concurrent))
  3506. {
  3507. return collected;
  3508. }
  3509. // This disable partial if we do a repeated exhaustive GC
  3510. partial = false;
  3511. collected = true;
  3512. continue;
  3513. }
  3514. // Not doing partial collect, we should decommit on finish collect
  3515. decommitOnFinish = true;
  3516. if (inPartialCollectMode)
  3517. {
  3518. // finish the partial collect first
  3519. FinishPartialCollect();
  3520. // Old heap block with free object is made available, count that as being collected
  3521. collected = true;
  3522. // PARTIAL-GC-CONSIDER: should we just pretend we did a GC, since we have made the free listed object
  3523. // available to be used, instead of starting off another GC?
  3524. }
  3525. #endif
  3526. #if ENABLE_CONCURRENT_GC
  3527. bool skipConcurrent = false;
  3528. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3529. // If the below flag is passed in, skip doing a non-blocking concurrent collect. Instead,
  3530. // we will do a blocking concurrent collect, which is basically an in-thread GC
  3531. skipConcurrent = GetRecyclerFlagsTable().ForceBlockingConcurrentCollect;
  3532. #endif
  3533. // We are about to start a collection. Reset our heuristic counters now, so that
  3534. // any allocations that occur during concurrent collection count toward the next collection's threshold.
  3535. ResetHeuristicCounters();
  3536. if (concurrent && !skipConcurrent)
  3537. {
  3538. Assert(!forceInThread);
  3539. if (enableConcurrentMark)
  3540. {
  3541. if (StartBackgroundMarkCollect())
  3542. {
  3543. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3544. return collected;
  3545. }
  3546. // Either ResetWriteWatch failed or the thread service failed
  3547. // So concurrent mark is disabled, at least for now
  3548. }
  3549. if (enableConcurrentSweep)
  3550. {
  3551. if (StartConcurrentSweepCollect())
  3552. {
  3553. collected = true;
  3554. continue;
  3555. }
  3556. // out of memory during collection
  3557. return collected;
  3558. }
  3559. // concurrent collection failed, default back to non-concurrent collection
  3560. }
  3561. if (!forceInThread && enableConcurrentMark)
  3562. {
  3563. if (!CollectOnConcurrentThread())
  3564. {
  3565. // time out or out of memory during collection
  3566. return collected;
  3567. }
  3568. }
  3569. else
  3570. #endif
  3571. {
  3572. if (!CollectOnAllocatorThread())
  3573. {
  3574. // out of memory during collection
  3575. return collected;
  3576. }
  3577. }
  3578. collected = true;
  3579. #ifdef RECYCLER_TRACE
  3580. collectionParam.repeat = true;
  3581. #endif
  3582. }
  3583. while (this->NeedExhaustiveRepeatCollect());
  3584. #if ENABLE_CONCURRENT_GC
  3585. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3586. if (this->CollectionInProgress())
  3587. {
  3588. Assert(this->IsConcurrentState());
  3589. Assert(collected);
  3590. return true;
  3591. }
  3592. #endif
  3593. EndCollection();
  3594. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3595. return collected;
  3596. }
  3597. void
  3598. Recycler::EndCollection()
  3599. {
  3600. #if ENABLE_CONCURRENT_GC
  3601. Assert(this->backgroundFinishMarkCount == 0);
  3602. #endif
  3603. Assert(!this->CollectionInProgress());
  3604. // no more collection is requested, we can turn exhaustive back off
  3605. this->inExhaustiveCollection = false;
  3606. if (this->inDecommitNowCollection || CUSTOM_CONFIG_FLAG(GetRecyclerFlagsTable(), ForceDecommitOnCollect))
  3607. {
  3608. #ifdef RECYCLER_TRACE
  3609. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3610. {
  3611. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Decommit now"));
  3612. }
  3613. #endif
  3614. autoHeap.DecommitNow();
  3615. this->inDecommitNowCollection = false;
  3616. }
  3617. RECORD_TIMESTAMP(lastCollectionEndTime);
  3618. }
  3619. #if ENABLE_PARTIAL_GC
  3620. bool
  3621. Recycler::PartialCollect(bool concurrent)
  3622. {
  3623. Assert(IsMarkStackEmpty());
  3624. Assert(this->inPartialCollectMode);
  3625. Assert(collectionState == CollectionStateNotCollecting);
  3626. // Rescan again
  3627. this->SetCollectionState(CollectionStateRescanFindRoots);
  3628. #if ENABLE_CONCURRENT_GC
  3629. if (concurrent && enableConcurrentMark && this->partialConcurrentNextCollection)
  3630. {
  3631. this->PrepareBackgroundFindRoots();
  3632. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  3633. {
  3634. #ifdef RECYCLER_TRACE
  3635. PrintCollectTrace(Js::ConcurrentPartialCollectPhase);
  3636. #endif
  3637. return false;
  3638. }
  3639. this->RevertPrepareBackgroundFindRoots();
  3640. }
  3641. #endif
  3642. #ifdef RECYCLER_STRESS
  3643. if (forcePartialScanStack)
  3644. {
  3645. // Mark the roots since they need not have been marked
  3646. // in RecyclerPartialStress mode
  3647. this->RootMark(collectionState);
  3648. }
  3649. #endif
  3650. #ifdef RECYCLER_TRACE
  3651. PrintCollectTrace(Js::PartialCollectPhase);
  3652. #endif
  3653. bool needConcurrentSweep = false;
  3654. this->CollectionBegin<Js::PartialCollectPhase>();
  3655. size_t rescanRootBytes = FinishMark(INFINITE);
  3656. Assert(rescanRootBytes != Recycler::InvalidScanRootBytes);
  3657. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  3658. this->CollectionEnd<Js::PartialCollectPhase>();
  3659. // Only reset the new page counter
  3660. autoHeap.uncollectedNewPageCount = 0;
  3661. // Finish collection
  3662. FinishCollection(needConcurrentSweep);
  3663. return true;
  3664. }
  3665. void
  3666. Recycler::ProcessClientTrackedObjects()
  3667. {
  3668. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_START, (this));
  3669. Assert(this->inPartialCollectMode);
  3670. #if ENABLE_CONCURRENT_GC
  3671. Assert(!this->DoQueueTrackedObject());
  3672. #endif
  3673. if (!this->clientTrackedObjectList.Empty())
  3674. {
  3675. SListBase<void *>::Iterator iter(&this->clientTrackedObjectList);
  3676. while (iter.Next())
  3677. {
  3678. auto& reference = iter.Data();
  3679. this->TryMarkNonInterior(reference, &reference /* parentReference */); // Reference to inside the node
  3680. RECYCLER_STATS_INC(this, clientTrackedObjectCount);
  3681. }
  3682. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3683. }
  3684. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_STOP, (this));
  3685. }
  3686. void
  3687. Recycler::ClearPartialCollect()
  3688. {
  3689. #if ENABLE_CONCURRENT_GC
  3690. Assert(!this->DoQueueTrackedObject());
  3691. #endif
  3692. this->autoHeap.unusedPartialCollectFreeBytes = 0;
  3693. this->partialUncollectedAllocBytes = 0;
  3694. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3695. this->uncollectedNewPageCountPartialCollect = (size_t)-1;
  3696. }
  3697. void
  3698. Recycler::FinishPartialCollect(RecyclerSweepManager * recyclerSweepManager)
  3699. {
  3700. Assert(recyclerSweepManager == nullptr || !recyclerSweepManager->IsBackground());
  3701. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FinishPartialPhase);
  3702. Assert(inPartialCollectMode);
  3703. #if ENABLE_CONCURRENT_GC
  3704. Assert(!this->DoQueueTrackedObject());
  3705. #endif
  3706. autoHeap.FinishPartialCollect(recyclerSweepManager);
  3707. this->inPartialCollectMode = false;
  3708. ClearPartialCollect();
  3709. RECYCLER_PROFILE_EXEC_END(this, Js::FinishPartialPhase);
  3710. }
  3711. #endif
  3712. void
  3713. Recycler::EnsureNotCollecting()
  3714. {
  3715. #if ENABLE_CONCURRENT_GC
  3716. FinishConcurrent<ForceFinishCollection>();
  3717. #endif
  3718. Assert(!this->CollectionInProgress());
  3719. }
  3720. void Recycler::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  3721. {
  3722. // Make sure we are not collecting
  3723. EnsureNotCollecting();
  3724. #if ENABLE_PARTIAL_GC
  3725. // We are updating the free bit vector, messing up the partial collection state.
  3726. // Just get out of partial collect mode
  3727. // GC-CONSIDER: consider adding an option in FinishConcurrent to not get into partial collect mode during sweep.
  3728. if (inPartialCollectMode)
  3729. {
  3730. FinishPartialCollect();
  3731. }
  3732. #endif
  3733. autoHeap.EnumerateObjects(infoBits, CallBackFunction);
  3734. // GC-TODO: Explicit heap?
  3735. }
  3736. BOOL
  3737. Recycler::IsMarkState() const
  3738. {
  3739. return (collectionState & Collection_Mark);
  3740. }
  3741. BOOL
  3742. Recycler::IsFindRootsState() const
  3743. {
  3744. return (collectionState & Collection_FindRoots);
  3745. }
  3746. #if DBG
  3747. BOOL
  3748. Recycler::IsReentrantState() const
  3749. {
  3750. #if ENABLE_CONCURRENT_GC
  3751. return !this->CollectionInProgress() || this->IsConcurrentState();
  3752. #else
  3753. return !this->CollectionInProgress();
  3754. #endif
  3755. }
  3756. #endif
  3757. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  3758. template <Js::Phase phase> static ETWEventGCActivationKind GetETWEventGCActivationKind();
  3759. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::GarbageCollectPhase>() { return ETWEvent_GarbageCollect; }
  3760. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ThreadCollectPhase>() { return ETWEvent_ThreadCollect; }
  3761. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ConcurrentCollectPhase>() { return ETWEvent_ConcurrentCollect; }
  3762. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::PartialCollectPhase>() { return ETWEvent_PartialCollect; }
  3763. #endif
  3764. template <Js::Phase phase>
  3765. void
  3766. Recycler::CollectionBegin()
  3767. {
  3768. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase, phase);
  3769. GCETW_INTERNAL(GC_START, (this, GetETWEventGCActivationKind<phase>()));
  3770. GCETW_INTERNAL(GC_START2, (this, GetETWEventGCActivationKind<phase>(), this->collectionStartReason, this->collectionStartFlags));
  3771. }
  3772. template <Js::Phase phase>
  3773. void
  3774. Recycler::CollectionEnd()
  3775. {
  3776. GCETW_INTERNAL(GC_STOP, (this, GetETWEventGCActivationKind<phase>()));
  3777. GCETW_INTERNAL(GC_STOP2, (this, GetETWEventGCActivationKind<phase>(), this->collectionFinishReason, this->collectionStartFlags));
  3778. RECYCLER_PROFILE_EXEC_END2(this, phase, Js::RecyclerPhase);
  3779. }
  3780. #if ENABLE_CONCURRENT_GC
  3781. size_t
  3782. Recycler::BackgroundRescan(RescanFlags rescanFlags)
  3783. {
  3784. Assert(!this->isProcessingRescan);
  3785. DebugOnly(this->isProcessingRescan = true);
  3786. GCETW(GC_BACKGROUNDRESCAN_START, (this, backgroundRescanCount));
  3787. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRescanPhase);
  3788. #if GLOBAL_ENABLE_WRITE_BARRIER
  3789. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  3790. {
  3791. pendingWriteBarrierBlockMap.LockResize();
  3792. pendingWriteBarrierBlockMap.Map([](void* address, size_t size)
  3793. {
  3794. RecyclerWriteBarrierManager::WriteBarrier(address, size);
  3795. });
  3796. pendingWriteBarrierBlockMap.UnlockResize();
  3797. }
  3798. #endif
  3799. size_t rescannedPageCount = heapBlockMap.Rescan(this, ((rescanFlags & RescanFlags_ResetWriteWatch) != 0));
  3800. rescannedPageCount += autoHeap.Rescan(rescanFlags);
  3801. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRescanPhase);
  3802. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, backgroundRescanCount));
  3803. this->backgroundRescanCount++;
  3804. if (!this->NeedOOMRescan())
  3805. {
  3806. if ((rescanFlags & RescanFlags_ResetWriteWatch) != 0)
  3807. {
  3808. DebugOnly(this->isProcessingRescan = false);
  3809. }
  3810. return rescannedPageCount;
  3811. }
  3812. DebugOnly(this->isProcessingRescan = false);
  3813. return Recycler::InvalidScanRootBytes;
  3814. }
  3815. void
  3816. Recycler::BackgroundResetWriteWatchAll()
  3817. {
  3818. GCETW(GC_BACKGROUNDRESETWRITEWATCH_START, (this, -1));
  3819. heapBlockMap.ResetDirtyPages(this);
  3820. GCETW(GC_BACKGROUNDRESETWRITEWATCH_STOP, (this, -1));
  3821. }
  3822. #endif
  3823. size_t
  3824. Recycler::FinishMarkRescan(bool background)
  3825. {
  3826. #if !ENABLE_CONCURRENT_GC
  3827. Assert(!background);
  3828. #endif
  3829. if (background)
  3830. {
  3831. GCETW(GC_BACKGROUNDRESCAN_START, (this, 0));
  3832. }
  3833. else
  3834. {
  3835. GCETW(GC_RESCAN_START, (this));
  3836. }
  3837. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::RescanPhase);
  3838. #if ENABLE_CONCURRENT_GC
  3839. RescanFlags const flags = (background ? RescanFlags_ResetWriteWatch : RescanFlags_None);
  3840. #else
  3841. Assert(!background);
  3842. RescanFlags const flags = RescanFlags_None;
  3843. #endif
  3844. #if DBG
  3845. Assert(!this->isProcessingRescan);
  3846. this->isProcessingRescan = true;
  3847. #endif
  3848. #if ENABLE_CONCURRENT_GC
  3849. size_t scannedPageCount = heapBlockMap.Rescan(this, ((flags & RescanFlags_ResetWriteWatch) != 0));
  3850. scannedPageCount += autoHeap.Rescan(flags);
  3851. #else
  3852. size_t scannedPageCount = 0;
  3853. #endif
  3854. DebugOnly(this->isProcessingRescan = false);
  3855. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::RescanPhase);
  3856. if (background)
  3857. {
  3858. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, 0));
  3859. }
  3860. else
  3861. {
  3862. GCETW(GC_RESCAN_STOP, (this));
  3863. }
  3864. return scannedPageCount;
  3865. }
  3866. #if ENABLE_CONCURRENT_GC
  3867. void
  3868. Recycler::ProcessTrackedObjects()
  3869. {
  3870. GCETW(GC_PROCESS_TRACKED_OBJECT_START, (this));
  3871. #if ENABLE_PARTIAL_GC
  3872. Assert(this->clientTrackedObjectList.Empty());
  3873. Assert(!this->inPartialCollectMode);
  3874. #endif
  3875. Assert(this->DoQueueTrackedObject());
  3876. this->queueTrackedObject = false;
  3877. DebugOnly(this->isProcessingTrackedObjects = true);
  3878. markContext.ProcessTracked();
  3879. // If we did a parallel mark, we need to process any queued tracked objects from the parallel mark stack as well.
  3880. // If we didn't, this will do nothing.
  3881. parallelMarkContext1.ProcessTracked();
  3882. parallelMarkContext2.ProcessTracked();
  3883. parallelMarkContext3.ProcessTracked();
  3884. DebugOnly(this->isProcessingTrackedObjects = false);
  3885. GCETW(GC_PROCESS_TRACKED_OBJECT_STOP, (this));
  3886. }
  3887. #endif
  3888. BOOL
  3889. Recycler::RequestConcurrentWrapperCallback()
  3890. {
  3891. #if ENABLE_CONCURRENT_GC
  3892. Assert(!IsConcurrentExecutingState() && !IsConcurrentSweepState());
  3893. // Save the original collection state
  3894. CollectionState oldState = this->collectionState;
  3895. // Get the background thread to start the callback
  3896. if (StartConcurrent(CollectionStateConcurrentWrapperCallback))
  3897. {
  3898. // Wait for the callback to complete
  3899. WaitForConcurrentThread(INFINITE, RecyclerWaitReason::RequestConcurrentCallbackWrapper);
  3900. // The state must not change back until we restore the original state
  3901. Assert(collectionState == CollectionStateConcurrentWrapperCallback);
  3902. this->SetCollectionState(oldState);
  3903. return true;
  3904. }
  3905. #endif
  3906. return false;
  3907. }
  3908. #if ENABLE_CONCURRENT_GC
  3909. /*------------------------------------------------------------------------------------------------
  3910. * Concurrent
  3911. *------------------------------------------------------------------------------------------------*/
  3912. BOOL
  3913. Recycler::CollectOnConcurrentThread()
  3914. {
  3915. #if ENABLE_PARTIAL_GC
  3916. Assert(!inPartialCollectMode);
  3917. #endif
  3918. #ifdef RECYCLER_TRACE
  3919. PrintCollectTrace(Js::ThreadCollectPhase);
  3920. #endif
  3921. this->CollectionBegin<Js::ThreadCollectPhase>();
  3922. // Synchronous concurrent mark
  3923. if (!StartSynchronousBackgroundMark())
  3924. {
  3925. this->CollectionEnd<Js::ThreadCollectPhase>();
  3926. return false;
  3927. }
  3928. const DWORD waitTime = RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  3929. GCETW(GC_SYNCHRONOUSMARKWAIT_START, (this, waitTime));
  3930. const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::CollectOnConcurrentThread);
  3931. GCETW(GC_SYNCHRONOUSMARKWAIT_STOP, (this, !waited));
  3932. if (!waited)
  3933. {
  3934. #ifdef RECYCLER_TRACE
  3935. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase)
  3936. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ThreadCollectPhase))
  3937. {
  3938. Output::Print(_u("%04X> RC(%p): %s: %s\n"), this->mainThreadId, this, Js::PhaseNames[Js::ThreadCollectPhase], _u("Timeout"));
  3939. }
  3940. #endif
  3941. #ifdef ENABLE_JS_ETW
  3942. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_FailedTimeout;
  3943. #endif
  3944. this->CollectionEnd<Js::ThreadCollectPhase>();
  3945. return false;
  3946. }
  3947. // If the concurrent thread was done within the time limit, there shouldn't be
  3948. // any object needs to be rescanned
  3949. // CONCURRENT-TODO: Optimize it so we don't rescan in the background if we are still waiting
  3950. // GC-TODO: Unfortunately we can't assert this, as the background code gen thread may still
  3951. // touch GC memory (e.g. FunctionBody), causing write watch and rescan
  3952. // in the background.
  3953. // Assert(markContext.Empty());
  3954. DebugOnly(this->isProcessingRescan = false);
  3955. this->SetCollectionState(CollectionStateMark);
  3956. this->ProcessTrackedObjects();
  3957. this->ProcessMark(false);
  3958. this->EndMark();
  3959. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3960. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3961. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3962. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3963. // turned off, it will remain off until a concurrent GC happens
  3964. this->Sweep();
  3965. this->CollectionEnd<Js::ThreadCollectPhase>();
  3966. FinishCollection();
  3967. return true;
  3968. }
  3969. // explicit instantiation
  3970. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdle>();
  3971. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdleAtRoot>();
  3972. template BOOL Recycler::FinishConcurrent<FinishConcurrentDefault>();
  3973. template BOOL Recycler::FinishConcurrent<ForceFinishCollection>();
  3974. template <CollectionFlags flags>
  3975. BOOL
  3976. Recycler::FinishConcurrent()
  3977. {
  3978. CompileAssert((flags & ~(CollectOverride_AllowDispose | CollectOverride_ForceFinish | CollectOverride_ForceInThread
  3979. | CollectMode_Concurrent | CollectOverride_DisableIdleFinish | CollectOverride_BackgroundFinishMark
  3980. | CollectOverride_SkipStack | CollectOverride_FinishConcurrentTimeout)) == 0);
  3981. if (this->CollectionInProgress())
  3982. {
  3983. Assert(this->IsConcurrentEnabled());
  3984. Assert(IsConcurrentState());
  3985. const BOOL forceFinish = flags & CollectOverride_ForceFinish;
  3986. if (forceFinish || !IsConcurrentExecutingState())
  3987. {
  3988. #if ENABLE_BACKGROUND_PAGE_FREEING
  3989. if (CONFIG_FLAG(EnableBGFreeZero))
  3990. {
  3991. if (this->IsConcurrentSweepState())
  3992. {
  3993. // Help with the background thread to zero and flush zero pages
  3994. // if we are going to wait anyways.
  3995. autoHeap.ZeroQueuedPages();
  3996. autoHeap.FlushBackgroundPages();
  3997. }
  3998. }
  3999. #endif
  4000. #ifdef RECYCLER_TRACE
  4001. collectionParam.finishOnly = true;
  4002. collectionParam.flags = flags;
  4003. #endif
  4004. #if ENABLE_CONCURRENT_GC
  4005. // If SkipStack is provided, and we're not forcing the finish (i.e we're not in concurrent executing state)
  4006. // then, it's fine to set the skipStack flag to true, so that during the in-thread find-roots, we'll skip
  4007. // the stack scan
  4008. this->skipStack = ((flags & CollectOverride_SkipStack) != 0) && !forceFinish;
  4009. #if DBG
  4010. this->isFinishGCOnIdle = (flags == FinishConcurrentOnIdleAtRoot);
  4011. #endif
  4012. #endif
  4013. return FinishConcurrentCollectWrapped(flags);
  4014. }
  4015. }
  4016. return false;
  4017. }
  4018. template <CollectionFlags flags>
  4019. BOOL
  4020. Recycler::TryFinishConcurrentCollect()
  4021. {
  4022. Assert(this->CollectionInProgress());
  4023. RECYCLER_STATS_INC(this, finishCollectTryCount);
  4024. SetupPostCollectionFlags<flags>();
  4025. const BOOL concurrent = flags & CollectMode_Concurrent;
  4026. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  4027. Assert(this->IsConcurrentEnabled());
  4028. Assert(IsConcurrentState() || IsCollectionDisabled());
  4029. Assert(!concurrent || !forceInThread);
  4030. if (concurrent && concurrentThread != NULL)
  4031. {
  4032. if (IsConcurrentExecutingState())
  4033. {
  4034. if (!this->priorityBoost)
  4035. {
  4036. uint tickCount = GetTickCount();
  4037. if ((autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  4038. || (tickCount - this->tickCountStartConcurrent > RecyclerHeuristic::PriorityBoostTimeout(this->GetRecyclerFlagsTable())))
  4039. {
  4040. #ifdef RECYCLER_TRACE
  4041. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  4042. {
  4043. Output::Print(_u("%04X> RC(%p): %s: "), this->mainThreadId, this, _u("Set priority normal"));
  4044. if (autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  4045. {
  4046. Output::Print(_u("AllocBytes=%d (Time=%d)\n"), autoHeap.uncollectedAllocBytes, tickCount - this->tickCountStartConcurrent);
  4047. }
  4048. else
  4049. {
  4050. Output::Print(_u("Time=%d (AllocBytes=%d\n"), tickCount - this->tickCountStartConcurrent, autoHeap.uncollectedAllocBytes);
  4051. }
  4052. }
  4053. #endif
  4054. // Set it to a large number so we don't set the thread priority again
  4055. this->priorityBoost = true;
  4056. // The recycler thread hasn't come back in 5 seconds
  4057. // It either has a large object graph, or it is starving.
  4058. // Set the priority back to normal
  4059. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4060. }
  4061. }
  4062. return FinishDisposeObjectsWrapped<flags>();
  4063. }
  4064. else if ((flags & CollectOverride_FinishConcurrentTimeout) != 0)
  4065. {
  4066. uint tickCount = GetTickCount();
  4067. // If we haven't gone past the time to call finish collection,
  4068. // simply call FinishDisposeObjects and return
  4069. // Otherwise, actually go ahead and call FinishConcurrentCollectWrapped
  4070. // We do this only if this is a collection that allows finish concurrent to timeout
  4071. // If not, by default, we finish the collection
  4072. if (tickCount <= this->tickCountNextFinishCollection)
  4073. {
  4074. return FinishDisposeObjectsWrapped<flags>();
  4075. }
  4076. }
  4077. }
  4078. return FinishConcurrentCollectWrapped(flags);
  4079. }
  4080. BOOL
  4081. Recycler::IsConcurrentMarkState() const
  4082. {
  4083. return (collectionState & Collection_ConcurrentMark) == Collection_ConcurrentMark;
  4084. }
  4085. BOOL
  4086. Recycler::IsConcurrentMarkExecutingState() const
  4087. {
  4088. return (collectionState & (Collection_ConcurrentMark | Collection_ExecutingConcurrent)) == (Collection_ConcurrentMark | Collection_ExecutingConcurrent);
  4089. }
  4090. BOOL
  4091. Recycler::IsConcurrentResetMarksState() const
  4092. {
  4093. return collectionState == CollectionStateConcurrentResetMarks;
  4094. }
  4095. BOOL
  4096. Recycler::IsInThreadFindRootsState() const
  4097. {
  4098. CollectionState currentCollectionState = collectionState;
  4099. return (currentCollectionState & Collection_FindRoots) && (currentCollectionState != CollectionStateConcurrentFindRoots);
  4100. }
  4101. BOOL
  4102. Recycler::IsConcurrentFindRootState() const
  4103. {
  4104. return collectionState == CollectionStateConcurrentFindRoots;
  4105. }
  4106. BOOL
  4107. Recycler::IsConcurrentExecutingState() const
  4108. {
  4109. return (collectionState & Collection_ExecutingConcurrent);
  4110. }
  4111. BOOL
  4112. Recycler::IsConcurrentSweepExecutingState() const
  4113. {
  4114. return (collectionState & (Collection_ConcurrentSweep | Collection_ExecutingConcurrent)) == (Collection_ConcurrentSweep | Collection_ExecutingConcurrent);
  4115. }
  4116. BOOL
  4117. Recycler::IsConcurrentSweepSetupState() const
  4118. {
  4119. return (collectionState & CollectionStateSetupConcurrentSweep) == CollectionStateSetupConcurrentSweep;
  4120. }
  4121. BOOL
  4122. Recycler::IsConcurrentSweepState() const
  4123. {
  4124. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  4125. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  4126. {
  4127. return this->collectionState == CollectionStateConcurrentSweepPass1 ||
  4128. this->collectionState == CollectionStateConcurrentSweepPass1Wait ||
  4129. this->collectionState == CollectionStateConcurrentSweepPass2 ||
  4130. this->collectionState == CollectionStateConcurrentSweepPass2Wait;
  4131. }
  4132. else
  4133. #endif
  4134. {
  4135. return this->collectionState == CollectionStateConcurrentSweep;
  4136. }
  4137. }
  4138. BOOL
  4139. Recycler::IsConcurrentState() const
  4140. {
  4141. return (collectionState & Collection_Concurrent);
  4142. }
  4143. #if DBG
  4144. BOOL
  4145. Recycler::IsConcurrentFinishedState() const
  4146. {
  4147. return (collectionState & Collection_FinishConcurrent);
  4148. }
  4149. #endif
  4150. bool
  4151. Recycler::InitializeConcurrent(JsUtil::ThreadService *threadService)
  4152. {
  4153. try
  4154. {
  4155. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_OutOfMemory);
  4156. concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4157. if (concurrentWorkDoneEvent == nullptr)
  4158. {
  4159. throw Js::OutOfMemoryException();
  4160. }
  4161. #if DBG_DUMP
  4162. markContext.GetPageAllocator()->debugName = _u("ConcurrentCollect");
  4163. #endif
  4164. if (!threadService->HasCallback())
  4165. {
  4166. #ifdef IDLE_DECOMMIT_ENABLED
  4167. concurrentIdleDecommitEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4168. if (concurrentIdleDecommitEvent == nullptr)
  4169. {
  4170. throw Js::OutOfMemoryException();
  4171. }
  4172. #endif
  4173. concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4174. if (concurrentWorkReadyEvent == nullptr)
  4175. {
  4176. throw Js::OutOfMemoryException();
  4177. }
  4178. }
  4179. }
  4180. catch (Js::OutOfMemoryException)
  4181. {
  4182. Assert(concurrentWorkReadyEvent == nullptr);
  4183. if (concurrentWorkDoneEvent)
  4184. {
  4185. CloseHandle(concurrentWorkDoneEvent);
  4186. concurrentWorkDoneEvent = nullptr;
  4187. }
  4188. #ifdef IDLE_DECOMMIT_ENABLED
  4189. if (concurrentIdleDecommitEvent)
  4190. {
  4191. CloseHandle(concurrentIdleDecommitEvent);
  4192. concurrentIdleDecommitEvent = nullptr;
  4193. }
  4194. #endif
  4195. return false;
  4196. }
  4197. return true;
  4198. }
  4199. #pragma prefast(suppress:6262, "Where this function is call should have ample of stack space")
  4200. bool Recycler::AbortConcurrent(bool restoreState)
  4201. {
  4202. Assert(!this->CollectionInProgress() || this->IsConcurrentState());
  4203. // In case the thread already died, wait for that too
  4204. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4205. // Note, concurrentThread will be null if we have a threadService.
  4206. Assert(concurrentThread != NULL || threadService->HasCallback());
  4207. DWORD handleCount = (concurrentThread == NULL ? 1 : 2);
  4208. DWORD ret = WAIT_OBJECT_0;
  4209. if (this->IsConcurrentState())
  4210. {
  4211. this->isAborting = true;
  4212. if (this->concurrentThread != NULL)
  4213. {
  4214. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4215. }
  4216. ret = WaitForMultipleObjectsEx(handleCount, handle, FALSE, INFINITE, FALSE);
  4217. this->isAborting = false;
  4218. Assert(this->IsConcurrentFinishedState() || ret == WAIT_OBJECT_0 + 1);
  4219. if (ret == WAIT_OBJECT_0 && restoreState)
  4220. {
  4221. if (collectionState == CollectionStateRescanWait)
  4222. {
  4223. this->ResetMarkCollectionState();
  4224. }
  4225. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  4226. else if (collectionState == CollectionStateConcurrentSweepPass1Wait)
  4227. {
  4228. // Make sure we don't do another GC after finishing this one.
  4229. this->inExhaustiveCollection = false;
  4230. this->FinishSweepPrep();
  4231. this->FinishConcurrentSweepPass1();
  4232. this->SetCollectionState(CollectionStateConcurrentSweepPass2);
  4233. this->recyclerSweepManager->FinishSweep();
  4234. this->FinishConcurrentSweep();
  4235. this->recyclerSweepManager->EndBackground();
  4236. uint sweptBytes = 0;
  4237. #ifdef RECYCLER_STATS
  4238. sweptBytes = (uint)collectionStats.objectSweptBytes;
  4239. #endif
  4240. GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
  4241. this->SetCollectionState(CollectionStateTransferSweptWait);
  4242. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
  4243. // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
  4244. // FinishConcurrentCollect can wait for it again.
  4245. SetEvent(this->concurrentWorkDoneEvent);
  4246. EnsureNotCollecting();
  4247. }
  4248. #endif
  4249. else if (collectionState == CollectionStateTransferSweptWait)
  4250. {
  4251. // Make sure we don't do another GC after finishing this one.
  4252. this->inExhaustiveCollection = false;
  4253. // Let's just finish the sweep so that GC is in a consistent state, but don't run dispose
  4254. // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
  4255. // FinishConcurrentCollect can wait for it again.
  4256. SetEvent(this->concurrentWorkDoneEvent);
  4257. EnsureNotCollecting();
  4258. }
  4259. else
  4260. {
  4261. Assert(UNREACHED);
  4262. }
  4263. Assert(collectionState == CollectionStateNotCollecting);
  4264. Assert(this->isProcessingRescan == false);
  4265. }
  4266. else
  4267. {
  4268. // If we are shutting down and the wait for concurrent thread failed we fail fast
  4269. // to avoid any use-after-free of the objects in the HeapAllocator's private heap.
  4270. if (!restoreState)
  4271. {
  4272. AssertOrFailFastMsg(ret != WAIT_FAILED, "Wait for concurrent thread failed in AbortConcurrent.");
  4273. }
  4274. // Even if we weren't asked to restore states, we need to clean up the pending guest arena
  4275. CleanupPendingUnroot();
  4276. // Also need to release any pages held by the mark stack, if we abandoned it
  4277. markContext.Abort();
  4278. }
  4279. }
  4280. Assert(!this->hasPendingDeleteGuestArena);
  4281. return ret == WAIT_OBJECT_0;
  4282. }
  4283. void
  4284. Recycler::CleanupPendingUnroot()
  4285. {
  4286. Assert(!this->hasPendingConcurrentFindRoot);
  4287. if (hasPendingUnpinnedObject)
  4288. {
  4289. pinnedObjectMap.MapAndRemoveIf([](void * obj, PinRecord const &refCount)
  4290. {
  4291. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  4292. #ifdef STACK_BACK_TRACE
  4293. Assert(refCount != 0 || refCount.stackBackTraces == nullptr);
  4294. #endif
  4295. #endif
  4296. return refCount == 0;
  4297. });
  4298. hasPendingUnpinnedObject = false;
  4299. }
  4300. if (hasPendingDeleteGuestArena)
  4301. {
  4302. DebugOnly(bool foundPendingDelete = false);
  4303. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4304. while (guestArenaIter.Next())
  4305. {
  4306. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4307. if (allocator.pendingDelete)
  4308. {
  4309. allocator.SetLockBlockList(false);
  4310. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4311. DebugOnly(foundPendingDelete = true);
  4312. }
  4313. }
  4314. hasPendingDeleteGuestArena = false;
  4315. Assert(foundPendingDelete);
  4316. }
  4317. #if DBG
  4318. else
  4319. {
  4320. DListBase<GuestArenaAllocator>::Iterator guestArenaIter(&guestArenaList);
  4321. while (guestArenaIter.Next())
  4322. {
  4323. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4324. Assert(!allocator.pendingDelete);
  4325. }
  4326. }
  4327. #endif
  4328. }
  4329. void
  4330. Recycler::FinalizeConcurrent(bool restoreState)
  4331. {
  4332. bool needCleanExitState = restoreState;
  4333. #if defined(RECYCLER_DUMP_OBJECT_GRAPH)
  4334. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().DumpObjectGraphOnExit;
  4335. #endif
  4336. #ifdef LEAK_REPORT
  4337. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  4338. #endif
  4339. #ifdef CHECK_MEMORY_LEAK
  4340. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().CheckMemoryLeak;
  4341. #endif
  4342. bool aborted = AbortConcurrent(needCleanExitState);
  4343. SetCollectionState(CollectionStateExit);
  4344. if (aborted && this->concurrentThread != NULL)
  4345. {
  4346. // In case the thread already died, wait for that too
  4347. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4348. SetEvent(concurrentWorkReadyEvent);
  4349. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4350. // In case the thread already died, wait for that too
  4351. DWORD fRet = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4352. AssertOrFailFastMsg(fRet != WAIT_FAILED, "Wait for concurrent thread failed. Check handles passed to WaitForMultipleObjectsEx.");
  4353. }
  4354. // Shutdown parallel threads and return the handle for them so the caller can
  4355. // close it.
  4356. parallelThread1.Shutdown();
  4357. parallelThread2.Shutdown();
  4358. #ifdef IDLE_DECOMMIT_ENABLED
  4359. if (concurrentIdleDecommitEvent != nullptr)
  4360. {
  4361. CloseHandle(concurrentIdleDecommitEvent);
  4362. concurrentIdleDecommitEvent = nullptr;
  4363. }
  4364. #endif
  4365. CloseHandle(concurrentWorkDoneEvent);
  4366. concurrentWorkDoneEvent = nullptr;
  4367. if (concurrentWorkReadyEvent != NULL)
  4368. {
  4369. CloseHandle(concurrentWorkReadyEvent);
  4370. concurrentWorkReadyEvent = nullptr;
  4371. }
  4372. if (needCleanExitState)
  4373. {
  4374. // We may do another marking pass to look for memory leaks;
  4375. // Since we have shut down the concurrent thread, don't do a parallel mark.
  4376. this->enableConcurrentMark = false;
  4377. this->enableParallelMark = false;
  4378. this->enableConcurrentSweep = false;
  4379. }
  4380. this->threadService = nullptr;
  4381. if (concurrentThread != NULL)
  4382. {
  4383. CloseHandle(concurrentThread);
  4384. this->concurrentThread = nullptr;
  4385. }
  4386. }
  4387. bool
  4388. Recycler::EnableConcurrent(JsUtil::ThreadService *threadService, bool startAllThreads)
  4389. {
  4390. if (this->disableConcurrent)
  4391. {
  4392. return false;
  4393. }
  4394. if (!this->InitializeConcurrent(threadService))
  4395. {
  4396. return false;
  4397. }
  4398. #if ENABLE_DEBUG_CONFIG_OPTIONS
  4399. this->enableConcurrentMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase);
  4400. this->enableParallelMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase);
  4401. this->enableConcurrentSweep = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase);
  4402. #else
  4403. this->enableConcurrentMark = true;
  4404. this->enableParallelMark = true;
  4405. this->enableConcurrentSweep = true;
  4406. #endif
  4407. if (this->enableParallelMark && this->maxParallelism == 1)
  4408. {
  4409. // Disable parallel mark if only 1 CPU
  4410. this->enableParallelMark = false;
  4411. }
  4412. if (threadService->HasCallback())
  4413. {
  4414. this->threadService = threadService;
  4415. return true;
  4416. }
  4417. else
  4418. {
  4419. bool startConcurrentThread = true;
  4420. bool startedParallelThread1 = false;
  4421. bool startedParallelThread2 = false;
  4422. if (startAllThreads)
  4423. {
  4424. if (this->enableParallelMark && this->maxParallelism > 2)
  4425. {
  4426. if (!parallelThread1.EnableConcurrent(true))
  4427. {
  4428. startConcurrentThread = false;
  4429. }
  4430. else
  4431. {
  4432. startedParallelThread1 = true;
  4433. if (this->maxParallelism > 3)
  4434. {
  4435. if (!parallelThread2.EnableConcurrent(true))
  4436. {
  4437. startConcurrentThread = false;
  4438. }
  4439. else
  4440. {
  4441. startedParallelThread2 = true;
  4442. }
  4443. }
  4444. }
  4445. }
  4446. }
  4447. if (startConcurrentThread)
  4448. {
  4449. auto concurrentThread = PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize,
  4450. &Recycler::StaticThreadProc, this,
  4451. PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation,
  4452. _u("Chakra Background Recycler"));
  4453. if (concurrentThread != PlatformAgnostic::Thread::InvalidHandle)
  4454. {
  4455. HANDLE concurrentThreadWin32Handle = reinterpret_cast<HANDLE>(concurrentThread);
  4456. // Wait for recycler thread to initialize
  4457. HANDLE handle[2] = { this->concurrentWorkDoneEvent, concurrentThreadWin32Handle };
  4458. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4459. if (ret == WAIT_OBJECT_0)
  4460. {
  4461. this->threadService = threadService;
  4462. this->concurrentThread = concurrentThreadWin32Handle;
  4463. return true;
  4464. }
  4465. CloseHandle(concurrentThreadWin32Handle);
  4466. }
  4467. }
  4468. if (startedParallelThread1)
  4469. {
  4470. parallelThread1.Shutdown();
  4471. if (startedParallelThread2)
  4472. {
  4473. parallelThread2.Shutdown();
  4474. }
  4475. }
  4476. }
  4477. // We failed to start a concurrent thread so we set these back to false and clean up
  4478. this->enableConcurrentMark = false;
  4479. this->enableParallelMark = false;
  4480. this->enableConcurrentSweep = false;
  4481. if (concurrentWorkReadyEvent)
  4482. {
  4483. CloseHandle(concurrentWorkReadyEvent);
  4484. concurrentWorkReadyEvent = nullptr;
  4485. }
  4486. if (concurrentWorkDoneEvent)
  4487. {
  4488. CloseHandle(concurrentWorkDoneEvent);
  4489. concurrentWorkDoneEvent = nullptr;
  4490. }
  4491. #ifdef IDLE_DECOMMIT_ENABLED
  4492. if (concurrentIdleDecommitEvent)
  4493. {
  4494. CloseHandle(concurrentIdleDecommitEvent);
  4495. concurrentIdleDecommitEvent = nullptr;
  4496. }
  4497. #endif
  4498. return false;
  4499. }
  4500. void
  4501. Recycler::ShutdownThread()
  4502. {
  4503. if (this->IsConcurrentEnabled())
  4504. {
  4505. Assert(concurrentThread != NULL || threadService->HasCallback());
  4506. FinalizeConcurrent(false);
  4507. }
  4508. }
  4509. void
  4510. Recycler::DisableConcurrent()
  4511. {
  4512. if (this->IsConcurrentEnabled())
  4513. {
  4514. Assert(concurrentThread != NULL || threadService->HasCallback());
  4515. FinalizeConcurrent(true);
  4516. this->SetCollectionState(CollectionStateNotCollecting);
  4517. }
  4518. }
  4519. bool
  4520. Recycler::StartConcurrent(CollectionState const state)
  4521. {
  4522. // Reset the tick count to detect if the concurrent thread is taking too long
  4523. tickCountStartConcurrent = GetTickCount();
  4524. CollectionState oldState = this->collectionState;
  4525. this->SetCollectionState(state);
  4526. if (threadService->HasCallback())
  4527. {
  4528. Assert(concurrentThread == NULL);
  4529. Assert(concurrentWorkReadyEvent == NULL);
  4530. if (!threadService->Invoke(Recycler::StaticBackgroundWorkCallback, this))
  4531. {
  4532. this->SetCollectionState(oldState);
  4533. return false;
  4534. }
  4535. return true;
  4536. }
  4537. else
  4538. {
  4539. Assert(concurrentThread != NULL);
  4540. Assert(concurrentWorkReadyEvent != NULL);
  4541. SetEvent(concurrentWorkReadyEvent);
  4542. return true;
  4543. }
  4544. }
  4545. BOOL
  4546. Recycler::StartBackgroundMarkCollect()
  4547. {
  4548. #ifdef RECYCLER_TRACE
  4549. PrintCollectTrace(Js::ConcurrentMarkPhase);
  4550. #endif
  4551. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4552. // Asynchronous concurrent mark
  4553. BOOL success = StartAsynchronousBackgroundMark();
  4554. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4555. return success;
  4556. }
  4557. BOOL
  4558. Recycler::StartBackgroundMark(bool foregroundResetMark, bool foregroundFindRoots)
  4559. {
  4560. Assert(!this->CollectionInProgress());
  4561. CollectionState backgroundState = CollectionStateConcurrentResetMarks;
  4562. bool doBackgroundFindRoots = true;
  4563. if (foregroundResetMark || foregroundFindRoots)
  4564. {
  4565. // REVIEW: SWB, if there's only write barrier page change, we don't scan and mark?
  4566. #ifdef RECYCLER_WRITE_WATCH
  4567. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  4568. {
  4569. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  4570. bool hasWriteWatch = autoHeap.ResetWriteWatch();
  4571. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  4572. if (!hasWriteWatch)
  4573. {
  4574. // Disable concurrent mark
  4575. this->enableConcurrentMark = false;
  4576. #ifdef ENABLE_JS_ETW
  4577. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Failed;
  4578. #endif
  4579. return false;
  4580. }
  4581. }
  4582. #endif
  4583. // In-thread synchronized GC on the concurrent thread
  4584. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_SynchronizedImplicitRoots : ResetMarkFlags_Synchronized);
  4585. if (foregroundFindRoots)
  4586. {
  4587. this->SetCollectionState(CollectionStateFindRoots);
  4588. FindRoots();
  4589. ScanStack();
  4590. Assert(collectionState == CollectionStateFindRoots);
  4591. backgroundState = CollectionStateConcurrentMark;
  4592. doBackgroundFindRoots = false;
  4593. }
  4594. else
  4595. {
  4596. // Do find roots in the background
  4597. backgroundState = CollectionStateConcurrentFindRoots;
  4598. }
  4599. }
  4600. if (doBackgroundFindRoots)
  4601. {
  4602. this->PrepareBackgroundFindRoots();
  4603. }
  4604. if (!StartConcurrent(backgroundState))
  4605. {
  4606. if (doBackgroundFindRoots)
  4607. {
  4608. this->RevertPrepareBackgroundFindRoots();
  4609. }
  4610. this->collectionState = CollectionStateNotCollecting;
  4611. #ifdef ENABLE_JS_ETW
  4612. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Failed;
  4613. #endif
  4614. return false;
  4615. }
  4616. #ifdef ENABLE_JS_ETW
  4617. collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_StartedConcurrent;
  4618. #endif
  4619. return true;
  4620. }
  4621. BOOL
  4622. Recycler::StartAsynchronousBackgroundMark()
  4623. {
  4624. // Debug flags to turn off background reset mark or background find roots, default to doing every concurrently
  4625. return StartBackgroundMark(CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundResetMarksPhase), CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundFindRootsPhase));
  4626. }
  4627. BOOL
  4628. Recycler::StartSynchronousBackgroundMark()
  4629. {
  4630. return StartBackgroundMark(true, true);
  4631. }
  4632. BOOL
  4633. Recycler::StartConcurrentSweepCollect()
  4634. {
  4635. Assert(collectionState == CollectionStateNotCollecting);
  4636. #ifdef RECYCLER_TRACE
  4637. PrintCollectTrace(Js::ConcurrentSweepPhase);
  4638. #endif
  4639. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4640. this->Mark();
  4641. // We don't have rescan data if we disabled concurrent mark, assume the worst
  4642. // (which means it is harder to get into partial collect mode)
  4643. #if ENABLE_PARTIAL_GC
  4644. bool needConcurrentSweep = this->Sweep(RecyclerSweepManager::MaxPartialCollectRescanRootBytes, true, true);
  4645. #else
  4646. bool needConcurrentSweep = this->Sweep(true);
  4647. #endif
  4648. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4649. FinishCollection(needConcurrentSweep);
  4650. return true;
  4651. }
  4652. size_t
  4653. Recycler::BackgroundRepeatMark()
  4654. {
  4655. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRepeatMarkPhase);
  4656. Assert(this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount - 1);
  4657. size_t rescannedPageCount = this->BackgroundRescan(RescanFlags_ResetWriteWatch);
  4658. if (this->NeedOOMRescan() || this->isAborting)
  4659. {
  4660. // OOM'ed. Let's not continue
  4661. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4662. return Recycler::InvalidScanRootBytes;
  4663. }
  4664. // Rescan the stack
  4665. this->BackgroundScanStack();
  4666. // Process mark stack
  4667. this->DoBackgroundParallelMark();
  4668. if (this->NeedOOMRescan())
  4669. {
  4670. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4671. return Recycler::InvalidScanRootBytes;
  4672. }
  4673. #ifdef RECYCLER_STATS
  4674. Assert(this->backgroundRescanCount >= 1 && this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount);
  4675. this->collectionStats.backgroundMarkData[this->backgroundRescanCount - 1] = this->collectionStats.markData;
  4676. #endif
  4677. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4678. return rescannedPageCount;
  4679. }
  4680. char* Recycler::GetScriptThreadStackTop()
  4681. {
  4682. // We should have already checked if the recycler is thread bound or not
  4683. Assert(mainThreadHandle != NULL);
  4684. return (char*) savedThreadContext.GetStackTop();
  4685. }
  4686. size_t
  4687. Recycler::BackgroundScanStack()
  4688. {
  4689. if (this->skipStack)
  4690. {
  4691. #ifdef RECYCLER_TRACE
  4692. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  4693. #endif
  4694. return 0;
  4695. }
  4696. if (!this->isInScript || mainThreadHandle == nullptr)
  4697. {
  4698. // No point in scanning the main thread's stack if we are not in script
  4699. // We also can't scan the main thread's stack if we are not thread bounded, and didn't create the main thread's handle
  4700. return 0;
  4701. }
  4702. char* stackTop = this->GetScriptThreadStackTop();
  4703. if (stackTop != nullptr)
  4704. {
  4705. size_t size = (char *)stackBase - stackTop;
  4706. ScanMemoryInline<false>((void **)stackTop, size
  4707. ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
  4708. return size;
  4709. }
  4710. return 0;
  4711. }
  4712. void
  4713. Recycler::BackgroundMark()
  4714. {
  4715. Assert(this->DoQueueTrackedObject());
  4716. this->backgroundRescanCount = 0;
  4717. this->DoBackgroundParallelMark();
  4718. if (this->NeedOOMRescan() || this->isAborting)
  4719. {
  4720. return;
  4721. }
  4722. #ifdef RECYCLER_STATS
  4723. this->collectionStats.backgroundMarkData[0] = this->collectionStats.markData;
  4724. #endif
  4725. if (PHASE_OFF1(Js::BackgroundRepeatMarkPhase))
  4726. {
  4727. return;
  4728. }
  4729. // We always do one repeat mark pass.
  4730. size_t rescannedPageCount = this->BackgroundRepeatMark();
  4731. if (this->NeedOOMRescan() || this->isAborting)
  4732. {
  4733. // OOM'ed. Let's not continue
  4734. return;
  4735. }
  4736. Assert(rescannedPageCount != Recycler::InvalidScanRootBytes);
  4737. // If we rescanned enough pages in the previous repeat mark pass, then do one more
  4738. // to try to reduce the amount of work we need to do in-thread
  4739. if (rescannedPageCount >= RecyclerHeuristic::BackgroundSecondRepeatMarkThreshold)
  4740. {
  4741. this->BackgroundRepeatMark();
  4742. if (this->NeedOOMRescan() || this->isAborting)
  4743. {
  4744. // OOM'ed. Let's not continue
  4745. return;
  4746. }
  4747. }
  4748. }
  4749. void
  4750. Recycler::BackgroundMarkWeakRefs()
  4751. {
  4752. #if ENABLE_WEAK_REFERENCE_REGIONS
  4753. auto iterator = this->weakReferenceRegionList.GetIterator();
  4754. while (iterator.Next())
  4755. {
  4756. RecyclerWeakReferenceRegion region = iterator.Data();
  4757. RecyclerWeakReferenceRegionItem<void*> *items = region.GetPtr();
  4758. size_t count = region.GetCount();
  4759. for (size_t index = 0; index < count; ++index)
  4760. {
  4761. RecyclerWeakReferenceRegionItem<void*> &item = items[index];
  4762. if (item.ptr == nullptr)
  4763. {
  4764. continue;
  4765. }
  4766. if (((uintptr_t)item.heapBlock & 0x1) == 0x1)
  4767. {
  4768. // This weak reference is already marked
  4769. continue;
  4770. }
  4771. if (item.heapBlock == nullptr)
  4772. {
  4773. item.heapBlock = this->FindHeapBlock(item.ptr);
  4774. if (item.heapBlock == nullptr)
  4775. {
  4776. // This isn't a real weak reference, ignore it
  4777. continue;
  4778. }
  4779. }
  4780. if (item.heapBlock->TestObjectMarkedBit(item.ptr))
  4781. {
  4782. item.heapBlock = (HeapBlock*) ((uintptr_t)item.heapBlock | 0x1);
  4783. }
  4784. }
  4785. }
  4786. #endif
  4787. }
  4788. void
  4789. Recycler::BackgroundResetMarks()
  4790. {
  4791. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundResetMarksPhase);
  4792. GCETW(GC_BACKGROUNDRESETMARKS_START, (this));
  4793. Assert(IsMarkStackEmpty());
  4794. this->scanPinnedObjectMap = true;
  4795. this->hasScannedInitialImplicitRoots = false;
  4796. heapBlockMap.ResetMarks();
  4797. autoHeap.ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InBackgroundThreadImplicitRoots : ResetMarkFlags_InBackgroundThread);
  4798. GCETW(GC_BACKGROUNDRESETMARKS_STOP, (this));
  4799. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundResetMarksPhase);
  4800. }
  4801. void
  4802. Recycler::PrepareBackgroundFindRoots()
  4803. {
  4804. Assert(!this->hasPendingConcurrentFindRoot);
  4805. this->hasPendingConcurrentFindRoot = true;
  4806. // Save the thread context here. The background thread
  4807. // will use this saved context for the marking instead of
  4808. // trying to get the live thread context of the thread
  4809. SAVE_THREAD_CONTEXT();
  4810. // Temporarily disable resize so the background can scan without
  4811. // the memory being freed from under it
  4812. pinnedObjectMap.DisableResize();
  4813. // Update the cached info for big blocks in the guest arena
  4814. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4815. while (guestArenaIter.Next())
  4816. {
  4817. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4818. allocator.SetLockBlockList(true);
  4819. if (allocator.pendingDelete)
  4820. {
  4821. Assert(this->hasPendingDeleteGuestArena);
  4822. allocator.SetLockBlockList(false);
  4823. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4824. }
  4825. else if (this->backgroundFinishMarkCount == 0)
  4826. {
  4827. // Update the cached info for big block
  4828. allocator.GetBigBlocks(false);
  4829. }
  4830. }
  4831. this->hasPendingDeleteGuestArena = false;
  4832. }
  4833. void
  4834. Recycler::RevertPrepareBackgroundFindRoots()
  4835. {
  4836. Assert(this->hasPendingConcurrentFindRoot);
  4837. this->hasPendingConcurrentFindRoot = false;
  4838. pinnedObjectMap.EnableResize();
  4839. }
  4840. size_t
  4841. Recycler::BackgroundFindRoots()
  4842. {
  4843. #ifdef RECYCLER_STATS
  4844. size_t lastMarkCount = this->collectionStats.markData.markCount;
  4845. #endif
  4846. size_t scanRootBytes = 0;
  4847. Assert(this->IsConcurrentFindRootState());
  4848. Assert(this->hasPendingConcurrentFindRoot);
  4849. #if ENABLE_PARTIAL_GC
  4850. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4851. #else
  4852. Assert(this->DoQueueTrackedObject());
  4853. #endif
  4854. // Only mark pinned object and guest arenas, which is where most of the roots are.
  4855. // When we go back to the main thread to rescan, we will scan the rest of the root.
  4856. // NOTE: purposefully not marking the transientPinnedObject there. as it is transient :)
  4857. // background mark the pinned object. Since we are in concurrent find root state
  4858. // the main thread won't delete any entries from the map, so concurrent read
  4859. // to the map safe.
  4860. GCETW(GC_BACKGROUNDSCANROOTS_START, (this));
  4861. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundFindRootsPhase);
  4862. scanRootBytes += this->ScanPinnedObjects</*background = */true>();
  4863. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::FindRootArenaPhase);
  4864. // background mark the guest arenas. Since we are in concurrent find root state
  4865. // the main thread won't delete any arena, so concurrent reads to them are ok.
  4866. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4867. while (guestArenaIter.Next())
  4868. {
  4869. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4870. if (allocator.pendingDelete)
  4871. {
  4872. // Skip guest arena that are already marked for delete
  4873. Assert(this->hasPendingDeleteGuestArena);
  4874. continue;
  4875. }
  4876. scanRootBytes += ScanArena(&allocator, true);
  4877. }
  4878. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::FindRootArenaPhase);
  4879. this->ScanImplicitRoots();
  4880. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundFindRootsPhase);
  4881. this->hasPendingConcurrentFindRoot = false;
  4882. this->SetCollectionState(CollectionStateConcurrentMark);
  4883. GCETW(GC_BACKGROUNDSCANROOTS_STOP, (this));
  4884. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  4885. return scanRootBytes;
  4886. }
  4887. size_t
  4888. Recycler::BackgroundFinishMark()
  4889. {
  4890. #if ENABLE_PARTIAL_GC
  4891. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4892. #else
  4893. Assert(this->DoQueueTrackedObject());
  4894. #endif
  4895. Assert(collectionState == CollectionStateConcurrentFinishMark);
  4896. size_t rescannedRootBytes = FinishMarkRescan(true) * AutoSystemInfo::PageSize;
  4897. this->SetCollectionState(CollectionStateConcurrentFindRoots);
  4898. rescannedRootBytes += this->BackgroundFindRoots();
  4899. this->SetCollectionState(CollectionStateConcurrentFinishMark);
  4900. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::MarkPhase);
  4901. ProcessMark(true);
  4902. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::MarkPhase);
  4903. return rescannedRootBytes;
  4904. }
  4905. void
  4906. Recycler::SweepPendingObjects(RecyclerSweepManager& recyclerSweepManager)
  4907. {
  4908. autoHeap.SweepPendingObjects(recyclerSweepManager);
  4909. }
  4910. void
  4911. Recycler::ConcurrentTransferSweptObjects(RecyclerSweepManager& recyclerSweepManager)
  4912. {
  4913. Assert(!recyclerSweepManager.IsBackground());
  4914. Assert((this->collectionState & Collection_TransferSwept) == Collection_TransferSwept);
  4915. #if ENABLE_PARTIAL_GC
  4916. if (this->hasBackgroundFinishPartial)
  4917. {
  4918. this->hasBackgroundFinishPartial = false;
  4919. this->ClearPartialCollect();
  4920. }
  4921. #endif
  4922. autoHeap.ConcurrentTransferSweptObjects(recyclerSweepManager);
  4923. }
  4924. #if ENABLE_PARTIAL_GC
  4925. void
  4926. Recycler::ConcurrentPartialTransferSweptObjects(RecyclerSweepManager& recyclerSweepManager)
  4927. {
  4928. Assert(!recyclerSweepManager.IsBackground());
  4929. Assert(!this->hasBackgroundFinishPartial);
  4930. autoHeap.ConcurrentPartialTransferSweptObjects(recyclerSweepManager);
  4931. }
  4932. #endif
  4933. BOOL
  4934. Recycler::FinishConcurrentCollectWrapped(CollectionFlags flags)
  4935. {
  4936. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  4937. #if ENABLE_CONCURRENT_GC
  4938. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  4939. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  4940. #endif
  4941. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::FinishConcurrentCollect, flags);
  4942. return collected;
  4943. }
  4944. /**
  4945. * Compute ft1 - ft2, return result as a uint64
  4946. */
  4947. uint64 DiffFileTimes(LPFILETIME ft1, LPFILETIME ft2)
  4948. {
  4949. ULARGE_INTEGER ul1;
  4950. ULARGE_INTEGER ul2;
  4951. ul1.HighPart = ft1->dwHighDateTime;
  4952. ul1.LowPart = ft1->dwLowDateTime;
  4953. ul2.HighPart = ft2->dwHighDateTime;
  4954. ul2.LowPart = ft2->dwLowDateTime;
  4955. ULONGLONG result = ul1.QuadPart - ul2.QuadPart;
  4956. return result;
  4957. }
  4958. BOOL
  4959. Recycler::WaitForConcurrentThread(DWORD waitTime, RecyclerWaitReason caller)
  4960. {
  4961. Assert(this->IsConcurrentState() || this->collectionState == CollectionStateParallelMark);
  4962. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ConcurrentWaitPhase);
  4963. if (concurrentThread != NULL)
  4964. {
  4965. // Set the priority back to normal before we wait to ensure it doesn't starve
  4966. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4967. }
  4968. #ifdef ENABLE_BASIC_TELEMETRY
  4969. bool isBlockingMainThread = false;
  4970. Js::Tick start;
  4971. FILETIME kernelTime1;
  4972. FILETIME userTime1;
  4973. HANDLE hProcess = GetCurrentProcess();
  4974. if (this->telemetryStats.ShouldStartTelemetryCapture())
  4975. {
  4976. isBlockingMainThread = this->telemetryStats.IsOnScriptThread();
  4977. if (isBlockingMainThread)
  4978. {
  4979. start = Js::Tick::Now();
  4980. FILETIME creationTime;
  4981. FILETIME exitTime;
  4982. GetProcessTimes(hProcess, &creationTime, &exitTime, &kernelTime1, &userTime1);
  4983. }
  4984. }
  4985. #endif
  4986. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, waitTime);
  4987. #ifdef ENABLE_BASIC_TELEMETRY
  4988. if (isBlockingMainThread)
  4989. {
  4990. Js::Tick end = Js::Tick::Now();
  4991. Js::TickDelta elapsed = end - start;
  4992. FILETIME creationTime;
  4993. FILETIME exitTime;
  4994. FILETIME kernelTime2;
  4995. FILETIME userTime2;
  4996. GetProcessTimes(hProcess, &creationTime, &exitTime, &kernelTime2, &userTime2);
  4997. uint64 kernelTime = DiffFileTimes(&kernelTime2 , &kernelTime1);
  4998. uint64 userTime = DiffFileTimes(&userTime2, &userTime1);
  4999. // userTime & kernelTime reported from GetProcessTimes is the number of 100-nanosecond ticks
  5000. // for consistency convert to microseconds.
  5001. kernelTime = kernelTime / 10;
  5002. userTime = userTime / 10;
  5003. this->telemetryStats.IncrementUserThreadBlockedCount(elapsed.ToMicroseconds(), caller);
  5004. this->telemetryStats.IncrementUserThreadBlockedCpuTimeUser(userTime, caller);
  5005. this->telemetryStats.IncrementUserThreadBlockedCpuTimeKernel(kernelTime, caller);
  5006. }
  5007. #endif
  5008. if (concurrentThread != NULL)
  5009. {
  5010. if (ret == WAIT_TIMEOUT)
  5011. {
  5012. // Keep the priority boost.
  5013. priorityBoost = true;
  5014. }
  5015. else
  5016. {
  5017. Assert(ret == WAIT_OBJECT_0);
  5018. // Back to below normal
  5019. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_BELOW_NORMAL);
  5020. priorityBoost = false;
  5021. }
  5022. }
  5023. RECYCLER_PROFILE_EXEC_END(this, Js::ConcurrentWaitPhase);
  5024. return (ret == WAIT_OBJECT_0);
  5025. }
  5026. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  5027. AutoProtectPages::AutoProtectPages(Recycler* recycler, bool protectEnabled) :
  5028. isReadOnly(false),
  5029. recycler(recycler)
  5030. {
  5031. if (protectEnabled)
  5032. {
  5033. recycler->heapBlockMap.MakeAllPagesReadOnly(recycler);
  5034. isReadOnly = true;
  5035. }
  5036. }
  5037. AutoProtectPages::~AutoProtectPages()
  5038. {
  5039. Unprotect();
  5040. }
  5041. void AutoProtectPages::Unprotect()
  5042. {
  5043. if (isReadOnly)
  5044. {
  5045. recycler->heapBlockMap.MakeAllPagesReadWrite(recycler);
  5046. isReadOnly = false;
  5047. }
  5048. }
  5049. #endif
  5050. BOOL
  5051. Recycler::FinishConcurrentCollect(CollectionFlags flags)
  5052. {
  5053. if (!this->IsConcurrentState())
  5054. {
  5055. Assert(false);
  5056. return false;
  5057. }
  5058. #ifdef PROFILE_EXEC
  5059. Js::Phase concurrentPhase = Js::ConcurrentCollectPhase;
  5060. // TODO: Remove this workaround for unreferenced local after enabled -profile for GC
  5061. static_cast<Js::Phase>(concurrentPhase);
  5062. #endif
  5063. #if ENABLE_PARTIAL_GC
  5064. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  5065. (concurrentPhase = ((this->inPartialCollectMode && this->IsConcurrentMarkState())?
  5066. Js::ConcurrentPartialCollectPhase : Js::ConcurrentCollectPhase)));
  5067. #else
  5068. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  5069. (concurrentPhase = Js::ConcurrentCollectPhase));
  5070. #endif
  5071. // Don't do concurrent sweep if we have priority boosted.
  5072. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  5073. bool concurrent = (flags & CollectMode_Concurrent) != 0;
  5074. concurrent = concurrent && (!priorityBoost || this->backgroundRescanCount != 1);
  5075. #ifdef RECYCLER_TRACE
  5076. collectionParam.priorityBoostConcurrentSweepOverride = priorityBoost;
  5077. #endif
  5078. const DWORD waitTime = forceInThread? INFINITE : RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  5079. GCETW(GC_FINISHCONCURRENTWAIT_START, (this, waitTime));
  5080. const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::FinishConcurrentCollect);
  5081. GCETW(GC_FINISHCONCURRENTWAIT_STOP, (this, !waited));
  5082. if (!waited)
  5083. {
  5084. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  5085. return false;
  5086. }
  5087. bool needConcurrentSweep = false;
  5088. if (collectionState == CollectionStateRescanWait)
  5089. {
  5090. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentRescan));
  5091. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
  5092. #ifdef RECYCLER_TRACE
  5093. #if ENABLE_PARTIAL_GC
  5094. PrintCollectTrace(this->inPartialCollectMode ? Js::ConcurrentPartialCollectPhase : Js::ConcurrentMarkPhase, true);
  5095. #else
  5096. PrintCollectTrace(Js::ConcurrentMarkPhase, true);
  5097. #endif
  5098. #endif
  5099. SetCollectionState(CollectionStateRescanFindRoots);
  5100. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  5101. // TODO: Change this behavior
  5102. // ProtectPagesOnRescan is not supported in PageHeap mode because the page protection is changed
  5103. // outside the PageAllocator in PageHeap mode and so pages are not in the state that the
  5104. // PageAllocator expects when it goes to change the page protection
  5105. // One viable fix is to move the guard page protection logic outside of the heap blocks
  5106. // and into the page allocator
  5107. AssertMsg(!(IsPageHeapEnabled() && GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan), "ProtectPagesOnRescan not supported in page heap mode");
  5108. AutoProtectPages protectPages(this, GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan);
  5109. #endif
  5110. const bool backgroundFinishMark = !forceInThread && concurrent && ((flags & CollectOverride_BackgroundFinishMark) != 0);
  5111. const DWORD finishMarkWaitTime = RecyclerHeuristic::BackgroundFinishMarkWaitTime(backgroundFinishMark, GetRecyclerFlagsTable());
  5112. size_t rescanRootBytes = FinishMark(finishMarkWaitTime);
  5113. if (rescanRootBytes == Recycler::InvalidScanRootBytes)
  5114. {
  5115. Assert(this->IsMarkState());
  5116. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  5117. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  5118. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
  5119. // we timeout trying to mark.
  5120. return false;
  5121. }
  5122. #ifdef RECYCLER_STATS
  5123. collectionStats.continueCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  5124. #endif
  5125. #ifdef RECYCLER_VERIFY_MARK
  5126. if (GetRecyclerFlagsTable().RecyclerVerifyMark)
  5127. {
  5128. this->VerifyMark();
  5129. }
  5130. #endif
  5131. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  5132. protectPages.Unprotect();
  5133. #endif
  5134. #if ENABLE_PARTIAL_GC
  5135. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  5136. #else
  5137. needConcurrentSweep = this->Sweep(concurrent);
  5138. #endif
  5139. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  5140. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
  5141. }
  5142. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5143. else if (collectionState == CollectionStateConcurrentSweepPass1Wait)
  5144. {
  5145. this->FinishSweepPrep();
  5146. if (forceInThread)
  5147. {
  5148. this->FinishConcurrentSweepPass1();
  5149. this->SetCollectionState(CollectionStateConcurrentSweepPass2);
  5150. #ifdef RECYCLER_TRACE
  5151. if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  5152. {
  5153. Output::Print(_u("[GC #%d] Finishing Sweep Pass2 in-thread. \n"), this->collectionCount);
  5154. }
  5155. #endif
  5156. this->recyclerSweepManager->FinishSweep();
  5157. this->FinishConcurrentSweep();
  5158. this->recyclerSweepManager->EndBackground();
  5159. uint sweptBytes = 0;
  5160. #ifdef RECYCLER_STATS
  5161. sweptBytes = (uint)collectionStats.objectSweptBytes;
  5162. #endif
  5163. GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
  5164. this->SetCollectionState(CollectionStateTransferSweptWait);
  5165. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
  5166. FinishTransferSwept(flags);
  5167. }
  5168. else
  5169. {
  5170. needConcurrentSweep = true;
  5171. // Signal the background thread to finish concurrent sweep Pass2 for all the buckets.
  5172. StartConcurrent(CollectionStateConcurrentSweepPass2);
  5173. }
  5174. }
  5175. #endif
  5176. else
  5177. {
  5178. AssertMsg(this->collectionState == CollectionStateTransferSweptWait, "Do we need to handle this state?");
  5179. FinishTransferSwept(flags);
  5180. }
  5181. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  5182. FinishCollection(needConcurrentSweep);
  5183. if (!this->CollectionInProgress())
  5184. {
  5185. if (NeedExhaustiveRepeatCollect())
  5186. {
  5187. DoCollect((CollectionFlags)(flags & ~CollectMode_Partial));
  5188. }
  5189. else
  5190. {
  5191. EndCollection();
  5192. }
  5193. }
  5194. return true;
  5195. }
  5196. void
  5197. Recycler::FinishTransferSwept(CollectionFlags flags)
  5198. {
  5199. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentTransferSwept));
  5200. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentTransferSwept, this->collectionStartReason, this->collectionStartFlags));
  5201. GCETW(GC_FLUSHZEROPAGE_START, (this));
  5202. Assert(collectionState == CollectionStateTransferSweptWait);
  5203. #ifdef RECYCLER_TRACE
  5204. PrintCollectTrace(Js::ConcurrentSweepPhase, true);
  5205. #endif
  5206. SetCollectionState(CollectionStateTransferSwept);
  5207. #if ENABLE_BACKGROUND_PAGE_FREEING
  5208. if (CONFIG_FLAG(EnableBGFreeZero))
  5209. {
  5210. // We should have zeroed all the pages in the background thread
  5211. Assert(!autoHeap.HasZeroQueuedPages());
  5212. autoHeap.FlushBackgroundPages();
  5213. }
  5214. #endif
  5215. GCETW(GC_FLUSHZEROPAGE_STOP, (this));
  5216. GCETW(GC_TRANSFERSWEPTOBJECTS_START, (this));
  5217. Assert(this->recyclerSweepManager != nullptr);
  5218. Assert(!this->recyclerSweepManager->IsBackground());
  5219. #if ENABLE_PARTIAL_GC
  5220. if (this->inPartialCollectMode)
  5221. {
  5222. ConcurrentPartialTransferSweptObjects(*this->recyclerSweepManager);
  5223. }
  5224. else
  5225. #endif
  5226. {
  5227. ConcurrentTransferSweptObjects(*this->recyclerSweepManager);
  5228. }
  5229. recyclerSweepManager->EndSweep();
  5230. GCETW(GC_TRANSFERSWEPTOBJECTS_STOP, (this));
  5231. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentTransferSwept));
  5232. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentTransferSwept, this->collectionStartReason, this->collectionStartFlags));
  5233. }
  5234. #if !DISABLE_SEH
  5235. int
  5236. Recycler::ExceptFilter(LPEXCEPTION_POINTERS pEP)
  5237. {
  5238. #if DBG
  5239. // Assert exception code
  5240. if (pEP->ExceptionRecord->ExceptionCode == STATUS_ASSERTION_FAILURE)
  5241. {
  5242. return EXCEPTION_CONTINUE_SEARCH;
  5243. }
  5244. #endif
  5245. #ifdef GENERATE_DUMP
  5246. if (Js::Configuration::Global.flags.IsEnabled(Js::DumpOnCrashFlag))
  5247. {
  5248. Js::Throw::GenerateDump(pEP, Js::Configuration::Global.flags.DumpOnCrash);
  5249. }
  5250. #endif
  5251. #if DBG && _M_IX86
  5252. int callerEBP = *((int*)pEP->ContextRecord->Ebp);
  5253. Output::Print(_u("Recycler Concurrent Thread: Uncaught exception: EIP: 0x%X ExceptionCode: 0x%X EBP: 0x%X ReturnAddress: 0x%X ReturnAddress2: 0x%X\n"),
  5254. pEP->ExceptionRecord->ExceptionAddress, pEP->ExceptionRecord->ExceptionCode, pEP->ContextRecord->Eip,
  5255. pEP->ContextRecord->Ebp, *((int*)pEP->ContextRecord->Ebp + 1), *((int*) callerEBP + 1));
  5256. #endif
  5257. Output::Flush();
  5258. return EXCEPTION_CONTINUE_SEARCH;
  5259. }
  5260. #endif
  5261. unsigned int
  5262. Recycler::StaticThreadProc(LPVOID lpParameter)
  5263. {
  5264. DWORD ret = (DWORD)-1;
  5265. #if !DISABLE_SEH
  5266. __try
  5267. {
  5268. #endif
  5269. Recycler * recycler = (Recycler *)lpParameter;
  5270. #if DBG
  5271. recycler->concurrentThreadExited = false;
  5272. #endif
  5273. ret = recycler->ThreadProc();
  5274. #if !DISABLE_SEH
  5275. }
  5276. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  5277. {
  5278. Assert(false);
  5279. }
  5280. #endif
  5281. return ret;
  5282. }
  5283. void
  5284. Recycler::StaticBackgroundWorkCallback(void * callbackData)
  5285. {
  5286. Recycler * recycler = (Recycler *) callbackData;
  5287. recycler->DoBackgroundWork(true);
  5288. }
  5289. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  5290. static ETWEventGCActivationKind
  5291. BackgroundMarkETWEventGCActivationKind(CollectionState collectionState)
  5292. {
  5293. return collectionState == CollectionStateConcurrentFinishMark?
  5294. ETWEvent_ConcurrentFinishMark : ETWEvent_ConcurrentMark;
  5295. }
  5296. #endif
  5297. void
  5298. Recycler::DoBackgroundWork(bool forceForeground)
  5299. {
  5300. if (this->collectionState == CollectionStateConcurrentWrapperCallback)
  5301. {
  5302. this->collectionWrapper->ConcurrentCallback();
  5303. }
  5304. else if (this->collectionState == CollectionStateParallelMark)
  5305. {
  5306. this->ProcessParallelMark(false, &this->markContext);
  5307. }
  5308. else if (this->IsConcurrentMarkState())
  5309. {
  5310. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, this->collectionState == CollectionStateConcurrentFinishMark?
  5311. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5312. GCETW_INTERNAL(GC_START, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5313. GCETW_INTERNAL(GC_START2, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState), this->collectionStartReason, this->collectionStartFlags));
  5314. DebugOnly(this->markContext.GetPageAllocator()->SetConcurrentThreadId(::GetCurrentThreadId()));
  5315. Assert(this->enableConcurrentMark);
  5316. if (this->collectionState != CollectionStateConcurrentFinishMark)
  5317. {
  5318. this->StartQueueTrackedObject();
  5319. }
  5320. switch (this->collectionState)
  5321. {
  5322. case CollectionStateConcurrentResetMarks:
  5323. this->BackgroundResetMarks();
  5324. this->BackgroundResetWriteWatchAll();
  5325. this->SetCollectionState(CollectionStateConcurrentFindRoots);
  5326. // fall-through
  5327. case CollectionStateConcurrentFindRoots:
  5328. this->BackgroundFindRoots();
  5329. this->BackgroundScanStack();
  5330. this->SetCollectionState(CollectionStateConcurrentMark);
  5331. // fall-through
  5332. case CollectionStateConcurrentMark:
  5333. this->BackgroundMark();
  5334. this->collectionState = CollectionStateConcurrentMarkWeakRef;
  5335. // fall-through
  5336. case CollectionStateConcurrentMarkWeakRef:
  5337. this->BackgroundMarkWeakRefs();
  5338. Assert(this->collectionState == CollectionStateConcurrentMarkWeakRef);
  5339. RECORD_TIMESTAMP(concurrentMarkFinishTime);
  5340. break;
  5341. case CollectionStateConcurrentFinishMark:
  5342. this->backgroundRescanRootBytes = this->BackgroundFinishMark();
  5343. Assert(!HasPendingMarkObjects());
  5344. break;
  5345. default:
  5346. Assert(false);
  5347. break;
  5348. };
  5349. GCETW_INTERNAL(GC_STOP, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5350. GCETW_INTERNAL(GC_STOP2, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState), this->collectionStartReason, this->collectionStartFlags));
  5351. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, this->collectionState == CollectionStateConcurrentFinishMark?
  5352. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5353. this->SetCollectionState(CollectionStateRescanWait);
  5354. DebugOnly(this->markContext.GetPageAllocator()->ClearConcurrentThreadId());
  5355. }
  5356. else
  5357. {
  5358. Assert(this->enableConcurrentSweep);
  5359. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5360. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !forceForeground)
  5361. {
  5362. if (this->collectionState == CollectionStateConcurrentSweep)
  5363. {
  5364. this->DoTwoPassConcurrentSweepPreCheck();
  5365. if (this->AllowAllocationsDuringConcurrentSweep())
  5366. {
  5367. this->SetCollectionState(CollectionStateConcurrentSweepPass1);
  5368. }
  5369. }
  5370. Assert((!this->AllowAllocationsDuringConcurrentSweep() && this->collectionState == CollectionStateConcurrentSweep) || this->collectionState == CollectionStateConcurrentSweepPass1 || this->collectionState == CollectionStateConcurrentSweepPass2);
  5371. }
  5372. else
  5373. #endif
  5374. {
  5375. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5376. }
  5377. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5378. if (this->collectionState == CollectionStateConcurrentSweepPass1 ||
  5379. ((!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) ||!this->AllowAllocationsDuringConcurrentSweep()) && this->collectionState == CollectionStateConcurrentSweep))
  5380. #endif
  5381. {
  5382. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
  5383. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5384. if (this->collectionState == CollectionStateConcurrentSweepPass1)
  5385. {
  5386. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_Pass1));
  5387. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_Pass1, this->collectionStartReason, this->collectionStartFlags));
  5388. }
  5389. else
  5390. #endif
  5391. {
  5392. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep));
  5393. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep, this->collectionStartReason, this->collectionStartFlags));
  5394. }
  5395. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5396. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5397. if (CONFIG_FLAG(EnableBGFreeZero))
  5398. {
  5399. // Zero the queued pages first so they are available to be allocated
  5400. autoHeap.BackgroundZeroQueuedPages();
  5401. }
  5402. #endif
  5403. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5404. GCETW(GC_BACKGROUNDSWEEP_START, (this));
  5405. Assert(this->recyclerSweepManager != nullptr);
  5406. this->recyclerSweepManager->BackgroundSweep();
  5407. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5408. if (this->collectionState == CollectionStateConcurrentSweepPass1)
  5409. {
  5410. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_Pass1));
  5411. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_Pass1, this->collectionStartReason, this->collectionStartFlags));
  5412. }
  5413. #endif
  5414. // If allocations were allowed during concurrent sweep then the allocableHeapBlock lists still needs to be swept so we
  5415. // will remain in CollectionStateConcurrentSweepPass1Wait state.
  5416. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5417. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
  5418. {
  5419. this->SetCollectionState(CollectionStateConcurrentSweepPass1Wait);
  5420. }
  5421. #endif
  5422. }
  5423. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5424. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  5425. {
  5426. if (this->collectionState == CollectionStateConcurrentSweepPass2)
  5427. {
  5428. #ifdef RECYCLER_TRACE
  5429. if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  5430. {
  5431. Output::Print(_u("[GC #%d] Finishing Sweep Pass2 on background thread. \n"), this->collectionCount);
  5432. }
  5433. #endif
  5434. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5435. if (CONFIG_FLAG(EnableBGFreeZero))
  5436. {
  5437. // Drain the zero queue again as we might have free more during sweep
  5438. // in the background
  5439. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5440. autoHeap.BackgroundZeroQueuedPages();
  5441. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5442. }
  5443. #endif
  5444. this->FinishConcurrentSweepPass1();
  5445. this->recyclerSweepManager->FinishSweep();
  5446. this->FinishConcurrentSweep();
  5447. this->recyclerSweepManager->EndBackground();
  5448. this->SetCollectionState(CollectionStateConcurrentSweepPass2Wait);
  5449. }
  5450. }
  5451. #endif
  5452. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5453. if (this->collectionState == CollectionStateConcurrentSweepPass2Wait ||
  5454. (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !this->AllowAllocationsDuringConcurrentSweep()))
  5455. #endif
  5456. {
  5457. uint sweptBytes = 0;
  5458. #ifdef RECYCLER_STATS
  5459. sweptBytes = (uint)collectionStats.objectSweptBytes;
  5460. #endif
  5461. GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
  5462. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5463. if (CONFIG_FLAG(EnableBGFreeZero))
  5464. {
  5465. // Drain the zero queue again as we might have free more during sweep
  5466. // in the background
  5467. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5468. autoHeap.BackgroundZeroQueuedPages();
  5469. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5470. }
  5471. #endif
  5472. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5473. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
  5474. {
  5475. Assert(this->collectionState == CollectionStateConcurrentSweepPass2Wait);
  5476. }
  5477. else
  5478. #endif
  5479. {
  5480. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5481. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
  5482. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep, this->collectionStartReason, this->collectionStartFlags));
  5483. }
  5484. this->SetCollectionState(CollectionStateTransferSweptWait);
  5485. }
  5486. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
  5487. }
  5488. SetEvent(this->concurrentWorkDoneEvent);
  5489. collectionWrapper->WaitCollectionCallBack();
  5490. }
  5491. DWORD
  5492. Recycler::ThreadProc()
  5493. {
  5494. Assert(this->IsConcurrentEnabled());
  5495. #if !defined(_UCRT)
  5496. // We do this before we set the concurrentWorkDoneEvent because GetModuleHandleEx requires
  5497. // getting the loader lock. We could have the following case:
  5498. // Thread A => Initialize Concurrent Thread (C)
  5499. // C signals Signal Done
  5500. // C yields since its lower priority
  5501. // Thread A starts running- and is told to shut down.
  5502. // Thread A grabs loader lock as part of the shutdown sequence
  5503. // Thread A waits for C to be done
  5504. // C wakes up now- and tries to grab loader lock.
  5505. // To prevent this deadlock, we call GetModuleHandleEx first and then set the concurrentWorkDoneEvent
  5506. HMODULE dllHandle = NULL;
  5507. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&Recycler::StaticThreadProc, &dllHandle))
  5508. {
  5509. dllHandle = NULL;
  5510. }
  5511. #endif
  5512. #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
  5513. // LTTng has no concept of EventActivityIdControl
  5514. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  5515. GUID activityId = { 0 };
  5516. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  5517. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  5518. #endif
  5519. // Signal that the thread has started
  5520. SetEvent(this->concurrentWorkDoneEvent);
  5521. SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
  5522. #if defined(DBG) && defined(PROFILE_EXEC)
  5523. this->backgroundProfilerPageAllocator.SetConcurrentThreadId(::GetCurrentThreadId());
  5524. #endif
  5525. #ifdef IDLE_DECOMMIT_ENABLED
  5526. DWORD handleCount = this->concurrentIdleDecommitEvent? 2 : 1;
  5527. HANDLE handles[2] = { this->concurrentWorkReadyEvent, this->concurrentIdleDecommitEvent };
  5528. #endif
  5529. do
  5530. {
  5531. #ifdef IDLE_DECOMMIT_ENABLED
  5532. needIdleDecommitSignal = IdleDecommitSignal_None;
  5533. DWORD waitTime = autoHeap.IdleDecommit();
  5534. if (waitTime == INFINITE)
  5535. {
  5536. DWORD ret = ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedSignal, IdleDecommitSignal_None);
  5537. if (ret == IdleDecommitSignal_NeedTimer)
  5538. {
  5539. #if DBG
  5540. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5541. {
  5542. Output::Print(_u("Recycler Thread IdleDecommit Need Timer\n"));
  5543. Output::Flush();
  5544. }
  5545. #endif
  5546. continue;
  5547. }
  5548. }
  5549. #if DBG
  5550. else
  5551. {
  5552. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5553. {
  5554. Output::Print(_u("Recycler Thread IdleDecommit Wait %d\n"), waitTime);
  5555. Output::Flush();
  5556. }
  5557. }
  5558. #endif
  5559. DWORD result = WaitForMultipleObjectsEx(handleCount, handles, FALSE, waitTime, FALSE);
  5560. if (result != WAIT_OBJECT_0)
  5561. {
  5562. Assert((handleCount == 2 && result == WAIT_OBJECT_0 + 1) || (waitTime != INFINITE && result == WAIT_TIMEOUT));
  5563. #if DBG
  5564. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5565. {
  5566. if (result == WAIT_TIMEOUT)
  5567. {
  5568. Output::Print(_u("Recycler Thread IdleDecommit Timeout: %d\n"), waitTime);
  5569. }
  5570. else
  5571. {
  5572. Output::Print(_u("Recycler Thread IdleDecommit Signaled\n"));
  5573. }
  5574. Output::Flush();
  5575. }
  5576. #endif
  5577. continue;
  5578. }
  5579. #else
  5580. DWORD result = WaitForSingleObject(this->concurrentWorkReadyEvent, INFINITE);
  5581. Assert(result == WAIT_OBJECT_0);
  5582. #endif
  5583. if (this->collectionState == CollectionStateExit)
  5584. {
  5585. #if DBG
  5586. this->concurrentThreadExited = true;
  5587. #endif
  5588. break;
  5589. }
  5590. DoBackgroundWork();
  5591. }
  5592. while (true);
  5593. SetEvent(this->concurrentWorkDoneEvent);
  5594. #if !defined(_UCRT)
  5595. if (dllHandle)
  5596. {
  5597. FreeLibraryAndExitThread(dllHandle, 0);
  5598. }
  5599. else
  5600. #endif
  5601. {
  5602. return 0;
  5603. }
  5604. }
  5605. #endif //ENABLE_CONCURRENT_GC
  5606. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5607. void
  5608. Recycler::DoTwoPassConcurrentSweepPreCheck()
  5609. {
  5610. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  5611. {
  5612. // We will do two pass sweep only when BOTH of the following conditions are met:
  5613. // 1. GC was triggered while we are in script, as this is the only case when we will make use of the blocks in the
  5614. // SLIST during concurrent sweep.
  5615. // 2. We are not in a Partial GC.
  5616. // 3. At-least one heap bucket exceeds the RecyclerHeuristic::AllocDuringConcurrentSweepHeapBlockThreshold.
  5617. this->allowAllocationsDuringConcurrentSweepForCollection = this->isInScript && !this->recyclerSweepManager->InPartialCollect();
  5618. // Do the actual 2-pass check only if the first 2 checks pass.
  5619. if (this->allowAllocationsDuringConcurrentSweepForCollection)
  5620. {
  5621. // We fire the ETW event only when the actual 2-pass check is performed. This is to avoid messing up ETL processing of test runs when in partial collect.
  5622. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck));
  5623. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck, this->collectionStartReason, this->collectionStartFlags));
  5624. this->allowAllocationsDuringConcurrentSweepForCollection = this->autoHeap.DoTwoPassConcurrentSweepPreCheck();
  5625. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck));
  5626. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck, this->collectionStartReason, this->collectionStartFlags));
  5627. }
  5628. }
  5629. }
  5630. void
  5631. Recycler::FinishConcurrentSweepPass1()
  5632. {
  5633. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishPass1));
  5634. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishPass1, this->collectionStartReason, this->collectionStartFlags));
  5635. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  5636. {
  5637. AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
  5638. this->autoHeap.FinishConcurrentSweepPass1(this->recyclerSweepManagerInstance);
  5639. }
  5640. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishPass1));
  5641. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishPass1, this->collectionStartReason, this->collectionStartFlags));
  5642. }
  5643. void
  5644. Recycler::FinishSweepPrep()
  5645. {
  5646. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
  5647. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep, this->collectionStartReason, this->collectionStartFlags));
  5648. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  5649. {
  5650. AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
  5651. this->autoHeap.FinishSweepPrep(this->recyclerSweepManagerInstance);
  5652. }
  5653. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
  5654. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep, this->collectionStartReason, this->collectionStartFlags));
  5655. }
  5656. void
  5657. Recycler::FinishConcurrentSweep()
  5658. {
  5659. #if SUPPORT_WIN32_SLIST
  5660. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep));
  5661. GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep, this->collectionStartReason, this->collectionStartFlags));
  5662. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  5663. {
  5664. AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
  5665. this->autoHeap.FinishConcurrentSweep();
  5666. }
  5667. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep));
  5668. GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep, this->collectionStartReason, this->collectionStartFlags));
  5669. #endif
  5670. }
  5671. #endif
  5672. void
  5673. Recycler::FinishCollection(bool needConcurrentSweep)
  5674. {
  5675. #if ENABLE_CONCURRENT_GC
  5676. Assert(!!this->InConcurrentSweep() == needConcurrentSweep);
  5677. #else
  5678. Assert(!needConcurrentSweep);
  5679. #endif
  5680. if (!needConcurrentSweep)
  5681. {
  5682. FinishCollection();
  5683. }
  5684. else
  5685. {
  5686. FinishDisposeObjects();
  5687. }
  5688. }
  5689. void
  5690. Recycler::FinishCollection()
  5691. {
  5692. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  5693. Assert(!this->hasBackgroundFinishPartial);
  5694. #endif
  5695. Assert(!this->hasPendingDeleteGuestArena);
  5696. // Reset the time heuristics
  5697. ScheduleNextCollection();
  5698. {
  5699. AutoSwitchCollectionStates collectionState(this,
  5700. /* entry state */ CollectionStatePostCollectionCallback,
  5701. /* exit state */ CollectionStateNotCollecting);
  5702. collectionWrapper->PostCollectionCallBack();
  5703. }
  5704. #if ENABLE_CONCURRENT_GC
  5705. this->backgroundFinishMarkCount = 0;
  5706. #endif
  5707. // Do a partial page decommit now
  5708. if (decommitOnFinish)
  5709. {
  5710. autoHeap.DecommitNow(false);
  5711. this->decommitOnFinish = false;
  5712. }
  5713. RECYCLER_SLOW_CHECK(autoHeap.Check());
  5714. #ifdef RECYCLER_MEMORY_VERIFY
  5715. this->Verify(Js::RecyclerPhase);
  5716. #endif
  5717. #ifdef RECYCLER_FINALIZE_CHECK
  5718. this->VerifyFinalize();
  5719. #endif
  5720. #ifdef ENABLE_JS_ETW
  5721. FlushFreeRecord();
  5722. #endif
  5723. FinishDisposeObjects();
  5724. #ifdef RECYCLER_FINALIZE_CHECK
  5725. if (!this->IsMarkState())
  5726. {
  5727. this->VerifyFinalize();
  5728. }
  5729. #endif
  5730. #ifdef RECYCLER_STATS
  5731. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5732. {
  5733. PrintCollectStats();
  5734. }
  5735. #endif
  5736. #ifdef PROFILE_RECYCLER_ALLOC
  5737. if (MemoryProfiler::IsTraceEnabled(true))
  5738. {
  5739. PrintAllocStats();
  5740. }
  5741. #endif
  5742. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  5743. this->allowAllocationsDuringConcurrentSweepForCollection = false;
  5744. #endif
  5745. #if ENABLE_MEM_STATS
  5746. autoHeap.ReportMemStats(this);
  5747. #endif
  5748. #ifdef ENABLE_JS_ETW
  5749. this->collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown;
  5750. this->collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown;
  5751. #endif
  5752. RECORD_TIMESTAMP(currentCollectionEndTime);
  5753. }
  5754. void
  5755. Recycler::SetExternalRootMarker(ExternalRootMarker fn, void * context)
  5756. {
  5757. externalRootMarker = fn;
  5758. externalRootMarkerContext = context;
  5759. }
  5760. void
  5761. Recycler::SetCollectionWrapper(RecyclerCollectionWrapper * wrapper)
  5762. {
  5763. this->collectionWrapper = wrapper;
  5764. #if LARGEHEAPBLOCK_ENCODING
  5765. this->Cookie = wrapper->GetRandomNumber();
  5766. #else
  5767. this->Cookie = 0;
  5768. #endif
  5769. }
  5770. // TODO: (leish) remove following function? seems not make sense to re-allocate in recycler
  5771. char *
  5772. Recycler::Realloc(void* buffer, DECLSPEC_GUARD_OVERFLOW size_t existingBytes, DECLSPEC_GUARD_OVERFLOW size_t requestedBytes, bool truncate)
  5773. {
  5774. Assert(requestedBytes > 0);
  5775. if (existingBytes == 0)
  5776. {
  5777. Assert(buffer == nullptr);
  5778. return Alloc(requestedBytes);
  5779. }
  5780. Assert(buffer != nullptr);
  5781. size_t nbytes = AllocSizeMath::Align(requestedBytes, HeapConstants::ObjectGranularity);
  5782. // Since we successfully allocated, we shouldn't have integer overflow here
  5783. size_t nbytesExisting = AllocSizeMath::Align(existingBytes, HeapConstants::ObjectGranularity);
  5784. Assert(nbytesExisting >= existingBytes);
  5785. if (nbytes == nbytesExisting)
  5786. {
  5787. return (char *)buffer;
  5788. }
  5789. char* replacementBuf = this->Alloc(requestedBytes);
  5790. if (replacementBuf != nullptr)
  5791. {
  5792. // Truncate
  5793. if (existingBytes > requestedBytes && truncate)
  5794. {
  5795. js_memcpy_s(replacementBuf, requestedBytes, buffer, requestedBytes);
  5796. }
  5797. else
  5798. {
  5799. js_memcpy_s(replacementBuf, requestedBytes, buffer, existingBytes);
  5800. }
  5801. }
  5802. if (nbytesExisting > 0)
  5803. {
  5804. this->Free(buffer, nbytesExisting);
  5805. }
  5806. return replacementBuf;
  5807. }
  5808. bool
  5809. Recycler::ForceSweepObject()
  5810. {
  5811. #ifdef RECYCLER_TEST_SUPPORT
  5812. if (BinaryFeatureControl::RecyclerTest())
  5813. {
  5814. if (checkFn != nullptr)
  5815. {
  5816. return true;
  5817. }
  5818. }
  5819. #endif
  5820. #ifdef PROFILE_RECYCLER_ALLOC
  5821. if (trackerDictionary != nullptr)
  5822. {
  5823. // Need to sweep object if we are tracing recycler allocs
  5824. return true;
  5825. }
  5826. #endif
  5827. #ifdef RECYCLER_STATS
  5828. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5829. {
  5830. return true;
  5831. }
  5832. #endif
  5833. #if DBG
  5834. // Force sweeping the object so we can assert that we are not sweeping objects that are still implicit roots
  5835. if (this->enableScanImplicitRoots)
  5836. {
  5837. return true;
  5838. }
  5839. #endif
  5840. return false;
  5841. }
  5842. bool
  5843. Recycler::ShouldIdleCollectOnExit()
  5844. {
  5845. // Always reset partial heuristics even if we are not doing idle collecting
  5846. // So we don't carry the heuristics to the next script activation
  5847. this->ResetPartialHeuristicCounters();
  5848. if (this->CollectionInProgress())
  5849. {
  5850. #ifdef RECYCLER_TRACE
  5851. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase, _u("%04X> Skipping scheduling Idle Collect. Reason: Collection in progress\n"), ::GetCurrentThreadId());
  5852. #endif
  5853. // Don't schedule an idle collect if there is a collection going on already
  5854. // IDLE-GC-TODO: Fix ResetHeuristics in the GC so we can detect memory allocation during
  5855. // the concurrent collect and still schedule an idle collect
  5856. return false;
  5857. }
  5858. if (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5859. {
  5860. return true;
  5861. }
  5862. uint32 nextTime = tickCountNextCollection - tickDiffToNextCollect;
  5863. // We will try to start a concurrent collect if we are within .9 ms to next scheduled collection, AND,
  5864. // the size of allocation is larger than 32M. This is similar to CollectionAllocation logic, just
  5865. // earlier in both time heuristic and size heuristic, so we can do some concurrent GC while we are
  5866. // not in script.
  5867. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit
  5868. && GetTickCount() > nextTime)
  5869. {
  5870. #ifdef RECYCLER_TRACE
  5871. if (CUSTOM_PHASE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5872. {
  5873. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit)
  5874. {
  5875. Output::Print(_u("%04X> Idle collect on exit: alloc %d\n"), ::GetCurrentThreadId(), autoHeap.uncollectedAllocBytes);
  5876. }
  5877. else
  5878. {
  5879. Output::Print(_u("%04X> Idle collect on exit: time %d\n"), ::GetCurrentThreadId(), tickCountNextCollection - GetTickCount());
  5880. }
  5881. Output::Flush();
  5882. }
  5883. #endif
  5884. this->CollectNow<CollectNowConcurrent>();
  5885. return false;
  5886. }
  5887. Assert(!this->CollectionInProgress());
  5888. // Idle GC use the size heuristic. Only need to schedule on if we passed it.
  5889. return (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::IdleUncollectedAllocBytesCollection);
  5890. }
  5891. #if ENABLE_CONCURRENT_GC
  5892. bool
  5893. RecyclerParallelThread::StartConcurrent()
  5894. {
  5895. if (this->recycler->threadService->HasCallback())
  5896. {
  5897. // This may be the first time. If so, initialize by creating the doneEvent.
  5898. if (this->concurrentWorkDoneEvent == NULL)
  5899. {
  5900. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5901. if (this->concurrentWorkDoneEvent == nullptr)
  5902. {
  5903. return false;
  5904. }
  5905. }
  5906. Assert(concurrentThread == NULL);
  5907. Assert(concurrentWorkReadyEvent == NULL);
  5908. // Invoke thread service to process work
  5909. if (!this->recycler->threadService->Invoke(RecyclerParallelThread::StaticBackgroundWorkCallback, this))
  5910. {
  5911. return false;
  5912. }
  5913. }
  5914. else
  5915. {
  5916. // This may be the first time. If so, initialize and create thread.
  5917. if (this->concurrentWorkDoneEvent == NULL)
  5918. {
  5919. return this->EnableConcurrent(false);
  5920. }
  5921. else
  5922. {
  5923. Assert(this->concurrentThread != NULL);
  5924. Assert(this->concurrentWorkReadyEvent != NULL);
  5925. // signal that thread has been initialized
  5926. SetEvent(this->concurrentWorkReadyEvent);
  5927. }
  5928. }
  5929. return true;
  5930. }
  5931. bool
  5932. RecyclerParallelThread::EnableConcurrent(bool waitForThread)
  5933. {
  5934. this->synchronizeOnStartup = waitForThread;
  5935. Assert(this->concurrentWorkDoneEvent == NULL);
  5936. Assert(this->concurrentWorkReadyEvent == NULL);
  5937. Assert(this->concurrentThread == NULL);
  5938. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5939. if (this->concurrentWorkDoneEvent == nullptr)
  5940. {
  5941. return false;
  5942. }
  5943. this->concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5944. if (this->concurrentWorkReadyEvent == nullptr)
  5945. {
  5946. CloseHandle(this->concurrentWorkDoneEvent);
  5947. this->concurrentWorkDoneEvent = NULL;
  5948. return false;
  5949. }
  5950. auto threadHandle = PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize,
  5951. &RecyclerParallelThread::StaticThreadProc, this,
  5952. PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation, _u("Chakra Recycler Parallel Thread"));
  5953. if (threadHandle != PlatformAgnostic::Thread::InvalidHandle)
  5954. {
  5955. this->concurrentThread = reinterpret_cast<HANDLE>(threadHandle);
  5956. }
  5957. if (this->concurrentThread != nullptr && waitForThread)
  5958. {
  5959. // Wait for thread to initialize
  5960. HANDLE handle[2] = { this->concurrentWorkDoneEvent, this->concurrentThread };
  5961. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  5962. if (ret == WAIT_OBJECT_0)
  5963. {
  5964. return true;
  5965. }
  5966. CloseHandle(concurrentThread);
  5967. concurrentThread = nullptr;
  5968. }
  5969. if (this->concurrentThread == nullptr)
  5970. {
  5971. CloseHandle(this->concurrentWorkDoneEvent);
  5972. this->concurrentWorkDoneEvent = NULL;
  5973. CloseHandle(this->concurrentWorkReadyEvent);
  5974. this->concurrentWorkReadyEvent = NULL;
  5975. return false;
  5976. }
  5977. return true;
  5978. }
  5979. template <uint parallelId>
  5980. void
  5981. Recycler::ParallelWorkFunc()
  5982. {
  5983. Assert(parallelId == 0 || parallelId == 1);
  5984. MarkContext * markContext = (parallelId == 0 ? &this->parallelMarkContext2 : &this->parallelMarkContext3);
  5985. switch (this->collectionState)
  5986. {
  5987. case CollectionStateParallelMark:
  5988. this->ProcessParallelMark(false, markContext);
  5989. break;
  5990. case CollectionStateBackgroundParallelMark:
  5991. this->ProcessParallelMark(true, markContext);
  5992. break;
  5993. default:
  5994. Assert(false);
  5995. }
  5996. }
  5997. void
  5998. RecyclerParallelThread::WaitForConcurrent()
  5999. {
  6000. Assert(this->concurrentThread != NULL || this->recycler->threadService->HasCallback());
  6001. Assert(this->concurrentWorkDoneEvent != NULL);
  6002. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, INFINITE);
  6003. Assert(ret == WAIT_OBJECT_0);
  6004. }
  6005. void
  6006. RecyclerParallelThread::Shutdown()
  6007. {
  6008. Assert(this->recycler->collectionState == CollectionStateExit);
  6009. if (this->recycler->threadService->HasCallback())
  6010. {
  6011. if (this->concurrentWorkDoneEvent != NULL)
  6012. {
  6013. CloseHandle(this->concurrentWorkDoneEvent);
  6014. this->concurrentWorkDoneEvent = NULL;
  6015. }
  6016. }
  6017. else
  6018. {
  6019. if (this->concurrentThread != NULL)
  6020. {
  6021. HANDLE handles[2] = { concurrentWorkDoneEvent, concurrentThread };
  6022. SetEvent(concurrentWorkReadyEvent);
  6023. // During process shutdown, OS might kill this (recycler parallel i.e. concurrent) thread and it will not get chance to signal concurrentWorkDoneEvent.
  6024. // When we are performing shutdown of main (recycler) thread here, if we wait on concurrentWorkDoneEvent, WaitForObject() will never return.
  6025. // Hence wait for concurrentWorkDoneEvent + concurrentThread so if concurrentThread got killed, WaitForObject() will return and we will
  6026. // proceed further.
  6027. DWORD fRet = WaitForMultipleObjectsEx(2, handles, FALSE, INFINITE, FALSE);
  6028. AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
  6029. CloseHandle(this->concurrentWorkDoneEvent);
  6030. this->concurrentWorkDoneEvent = NULL;
  6031. CloseHandle(this->concurrentWorkReadyEvent);
  6032. this->concurrentWorkReadyEvent = NULL;
  6033. CloseHandle(this->concurrentThread);
  6034. this->concurrentThread = NULL;
  6035. }
  6036. }
  6037. Assert(this->concurrentThread == NULL);
  6038. Assert(this->concurrentWorkReadyEvent == NULL);
  6039. Assert(this->concurrentWorkDoneEvent == NULL);
  6040. }
  6041. // static
  6042. unsigned int
  6043. RecyclerParallelThread::StaticThreadProc(LPVOID lpParameter)
  6044. {
  6045. DWORD ret = (DWORD)-1;
  6046. #if !DISABLE_SEH
  6047. __try
  6048. {
  6049. #endif
  6050. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)lpParameter;
  6051. Recycler * recycler = parallelThread->recycler;
  6052. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  6053. Assert(recycler->IsConcurrentEnabled());
  6054. #if !defined(_UCRT)
  6055. HMODULE dllHandle = NULL;
  6056. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&RecyclerParallelThread::StaticThreadProc, &dllHandle))
  6057. {
  6058. dllHandle = NULL;
  6059. }
  6060. #endif
  6061. #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
  6062. // LTTng has no concept of EventActivityIdControl
  6063. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  6064. GUID activityId = { 0 };
  6065. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  6066. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  6067. #endif
  6068. // If this thread is created on demand we already have work to process and do not need to wait
  6069. bool mustWait = parallelThread->synchronizeOnStartup;
  6070. do
  6071. {
  6072. if (mustWait)
  6073. {
  6074. // Signal completion and wait for next work
  6075. SetEvent(parallelThread->concurrentWorkDoneEvent);
  6076. DWORD result = WaitForSingleObject(parallelThread->concurrentWorkReadyEvent, INFINITE);
  6077. Assert(result == WAIT_OBJECT_0);
  6078. }
  6079. if (recycler->collectionState == CollectionStateExit)
  6080. {
  6081. // Exit thread
  6082. break;
  6083. }
  6084. // Invoke the workFunc to do real work
  6085. (recycler->*workFunc)();
  6086. // We always wait after the first time
  6087. mustWait = true;
  6088. }
  6089. while (true);
  6090. // Signal to main thread that we have stopped processing and will shut down.
  6091. // Note that after this point, we cannot access anything on the Recycler instance
  6092. // because the main thread may have torn it down already.
  6093. SetEvent(parallelThread->concurrentWorkDoneEvent);
  6094. #if !defined(_UCRT)
  6095. if (dllHandle)
  6096. {
  6097. FreeLibraryAndExitThread(dllHandle, 0);
  6098. }
  6099. #endif
  6100. ret = 0;
  6101. #if !DISABLE_SEH
  6102. }
  6103. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  6104. {
  6105. Assert(false);
  6106. }
  6107. #endif
  6108. return ret;
  6109. }
  6110. // static
  6111. void
  6112. RecyclerParallelThread::StaticBackgroundWorkCallback(void * callbackData)
  6113. {
  6114. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)callbackData;
  6115. Recycler * recycler = parallelThread->recycler;
  6116. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  6117. (recycler->*workFunc)();
  6118. SetEvent(parallelThread->concurrentWorkDoneEvent);
  6119. }
  6120. #endif
  6121. #ifdef RECYCLER_TRACE
  6122. void
  6123. Recycler::CaptureCollectionParam(CollectionFlags flags, bool repeat)
  6124. {
  6125. collectionParam.priorityBoostConcurrentSweepOverride = false;
  6126. collectionParam.repeat = repeat;
  6127. collectionParam.finishOnly = false;
  6128. collectionParam.flags = flags;
  6129. collectionParam.uncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  6130. #if ENABLE_PARTIAL_GC
  6131. collectionParam.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  6132. collectionParam.inPartialCollectMode = inPartialCollectMode;
  6133. collectionParam.uncollectedNewPageCount = autoHeap.uncollectedNewPageCount;
  6134. collectionParam.unusedPartialCollectFreeBytes = autoHeap.unusedPartialCollectFreeBytes;
  6135. #endif
  6136. }
  6137. void
  6138. Recycler::PrintCollectTrace(Js::Phase phase, bool finish, bool noConcurrentWork)
  6139. {
  6140. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase) ||
  6141. GetRecyclerFlagsTable().Trace.IsEnabled(phase))
  6142. {
  6143. const BOOL allocSize = collectionParam.flags & CollectHeuristic_AllocSize;
  6144. const BOOL timedIfScriptActive = collectionParam.flags & CollectHeuristic_TimeIfScriptActive;
  6145. const BOOL timedIfInScript = collectionParam.flags & CollectHeuristic_TimeIfInScript;
  6146. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (collectionParam.flags & CollectHeuristic_Time);
  6147. const BOOL concurrent = collectionParam.flags & CollectMode_Concurrent;
  6148. const BOOL finishConcurrent = collectionParam.flags & CollectOverride_FinishConcurrent;
  6149. const BOOL exhaustive = collectionParam.flags & CollectMode_Exhaustive;
  6150. const BOOL forceInThread = collectionParam.flags & CollectOverride_ForceInThread;
  6151. const BOOL forceFinish = collectionParam.flags & CollectOverride_ForceFinish;
  6152. #if ENABLE_PARTIAL_GC
  6153. BOOL partial = collectionParam.flags & CollectMode_Partial ;
  6154. #endif
  6155. Output::Print(_u("%04X> RC(%p): %s%s%s%s%s%s%s:"), this->mainThreadId, this,
  6156. collectionParam.domCollect? _u("[DOM] ") : _u(""),
  6157. collectionParam.repeat? _u("[Repeat] "): _u(""),
  6158. this->inDispose? _u("[Nested]") : _u(""),
  6159. forceInThread? _u("Force In thread ") : _u(""),
  6160. finish? _u("Finish ") : _u(""),
  6161. exhaustive? _u("Exhaustive ") : _u(""),
  6162. Js::PhaseNames[phase]);
  6163. if (noConcurrentWork)
  6164. {
  6165. Assert(finish);
  6166. Output::Print(_u(" No concurrent work"));
  6167. }
  6168. else if (collectionParam.finishOnly)
  6169. {
  6170. Assert(!collectionParam.repeat);
  6171. Assert(finish);
  6172. #if ENABLE_CONCURRENT_GC
  6173. if (collectionState == CollectionStateRescanWait)
  6174. {
  6175. if (forceFinish)
  6176. {
  6177. Output::Print(_u(" Force finish mark and sweep"));
  6178. }
  6179. else if (concurrent && this->enableConcurrentSweep)
  6180. {
  6181. if (!collectionParam.priorityBoostConcurrentSweepOverride)
  6182. {
  6183. Output::Print(_u(" Finish mark and start concurrent sweep"));
  6184. }
  6185. else
  6186. {
  6187. Output::Print(_u(" Finish mark and sweep (priority boost overridden concurrent sweep)"));
  6188. }
  6189. }
  6190. else
  6191. {
  6192. Output::Print(_u(" Finish mark and sweep"));
  6193. }
  6194. }
  6195. else
  6196. {
  6197. Assert(collectionState == CollectionStateTransferSweptWait);
  6198. if (forceFinish)
  6199. {
  6200. Output::Print(_u(" Force finish sweep"));
  6201. }
  6202. else
  6203. {
  6204. Output::Print(_u(" Finish sweep"));
  6205. }
  6206. }
  6207. #endif // ENABLE_CONCURRENT_GC
  6208. }
  6209. else
  6210. {
  6211. if (finish && !concurrent)
  6212. {
  6213. Output::Print(_u(" Not concurrent collect"));
  6214. }
  6215. if ((finish && finishConcurrent))
  6216. {
  6217. Output::Print(_u(" No heuristic"));
  6218. }
  6219. #if ENABLE_CONCURRENT_GC
  6220. else if (finish && priorityBoost)
  6221. {
  6222. Output::Print(_u(" Priority boost no heuristic"));
  6223. }
  6224. #endif
  6225. else
  6226. {
  6227. Output::SkipToColumn(50);
  6228. bool byteCountUsed = false;
  6229. bool timeUsed = false;
  6230. #if ENABLE_PARTIAL_GC
  6231. bool newPageUsed = false;
  6232. if (phase == Js::PartialCollectPhase || phase == Js::ConcurrentPartialCollectPhase)
  6233. {
  6234. Assert(collectionParam.flags & CollectMode_Partial);
  6235. newPageUsed = !!allocSize;
  6236. }
  6237. else if (partial && collectionParam.inPartialCollectMode && collectionParam.uncollectedNewPageCount > collectionParam.uncollectedNewPageCountPartialCollect)
  6238. {
  6239. newPageUsed = true;
  6240. }
  6241. else
  6242. #endif // ENABLE_PARTIAL_GC
  6243. {
  6244. byteCountUsed = !!allocSize;
  6245. timeUsed = !!timed;
  6246. }
  6247. Output::Print(byteCountUsed? _u("*") : (allocSize? _u(" ") : _u("~")));
  6248. Output::Print(_u("B:%8d "), collectionParam.uncollectedAllocBytes);
  6249. Output::Print(timeUsed? _u("*") : (timed? _u(" ") : _u("~")));
  6250. Output::Print(_u("T:%4d "), -collectionParam.timeDiff);
  6251. #if ENABLE_PARTIAL_GC
  6252. if (collectionParam.inPartialCollectMode)
  6253. {
  6254. Output::Print(_u("L:%5d "), collectionParam.uncollectedNewPageCountPartialCollect);
  6255. }
  6256. else
  6257. {
  6258. Output::Print(_u("L:----- "));
  6259. }
  6260. Output::Print(newPageUsed? _u("*") : (partial? _u(" ") : _u("~")));
  6261. Output::Print(_u("P:%5d(%9d) "), collectionParam.uncollectedNewPageCount, collectionParam.uncollectedNewPageCount * AutoSystemInfo::PageSize);
  6262. Output::Print(_u("U:%8d"), collectionParam.unusedPartialCollectFreeBytes);
  6263. #endif // ENABLE_PARTIAL_GC
  6264. }
  6265. }
  6266. Output::Print(_u("\n"));
  6267. Output::Flush();
  6268. }
  6269. }
  6270. #endif
  6271. #ifdef RECYCLER_TRACE
  6272. void
  6273. Recycler::PrintBlockStatus(HeapBucket * heapBucket, HeapBlock * heapBlock, char16 const * statusMessage)
  6274. {
  6275. if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  6276. {
  6277. Output::Print(_u("[GC #%d] [HeapBucket 0x%p] HeapBlock 0x%p %s [CollectionState: %d] \n"), this->collectionCount, heapBucket, heapBlock, statusMessage, static_cast<CollectionState>(this->collectionState));
  6278. }
  6279. }
  6280. #endif
  6281. #ifdef RECYCLER_STATS
  6282. void
  6283. Recycler::PrintHeapBlockStats(char16 const * name, HeapBlock::HeapBlockType type)
  6284. {
  6285. size_t liveCount = collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type];
  6286. Output::Print(_u(" %6s : %5d %5d %5d %5.1f"), name,
  6287. liveCount, collectionStats.heapBlockFreeCount[type], collectionStats.heapBlockCount[type],
  6288. (double)collectionStats.heapBlockFreeCount[type] / (double)collectionStats.heapBlockCount[type] * 100);
  6289. if (type < HeapBlock::SmallBlockTypeCount)
  6290. {
  6291. Output::Print(_u(" : %5d %6.1f : %5d %6.1f"),
  6292. collectionStats.heapBlockSweptCount[type],
  6293. (double)collectionStats.heapBlockSweptCount[type] / (double)liveCount * 100,
  6294. collectionStats.heapBlockConcurrentSweptCount[type],
  6295. (double)collectionStats.heapBlockConcurrentSweptCount[type] / (double)collectionStats.heapBlockSweptCount[type] * 100);
  6296. }
  6297. }
  6298. void
  6299. Recycler::PrintHeapBlockMemoryStats(char16 const * name, HeapBlock::HeapBlockType type)
  6300. {
  6301. size_t allocableFreeByteCount = collectionStats.heapBlockFreeByteCount[type];
  6302. #if ENABLE_PARTIAL_GC
  6303. size_t partialUnusedBytes = 0;
  6304. if (this->enablePartialCollect)
  6305. {
  6306. partialUnusedBytes = allocableFreeByteCount
  6307. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[type];
  6308. allocableFreeByteCount -= partialUnusedBytes;
  6309. }
  6310. #endif
  6311. size_t blockPages = type < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount ?
  6312. SmallAllocationBlockAttributes::PageCount : MediumAllocationBlockAttributes::PageCount;
  6313. size_t totalByteCount = (collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type]) * blockPages * AutoSystemInfo::PageSize;
  6314. size_t liveByteCount = totalByteCount - collectionStats.heapBlockFreeByteCount[type];
  6315. Output::Print(_u(" %6s: %10d %10d"), name, liveByteCount, allocableFreeByteCount);
  6316. #if ENABLE_PARTIAL_GC
  6317. if (this->enablePartialCollect &&
  6318. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  6319. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  6320. #ifdef RECYCLER_WRITE_BARRIER
  6321. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  6322. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  6323. #endif
  6324. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  6325. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  6326. #ifdef RECYCLER_WRITE_BARRIER
  6327. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  6328. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  6329. #endif
  6330. ))
  6331. {
  6332. Output::Print(_u(" %10d"), partialUnusedBytes);
  6333. }
  6334. else
  6335. #endif
  6336. {
  6337. Output::Print(_u(" "));
  6338. }
  6339. Output::Print(_u(" %10d %6.1f"), totalByteCount,
  6340. (double)allocableFreeByteCount / (double)totalByteCount * 100);
  6341. #if ENABLE_PARTIAL_GC
  6342. if (this->enablePartialCollect &&
  6343. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  6344. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  6345. #ifdef RECYCLER_WRITE_BARRIER
  6346. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  6347. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  6348. #endif
  6349. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  6350. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  6351. #ifdef RECYCLER_WRITE_BARRIER
  6352. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  6353. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  6354. #endif
  6355. ))
  6356. {
  6357. Output::Print(_u(" %6.1f"), (double)partialUnusedBytes / (double)totalByteCount * 100);
  6358. }
  6359. #endif
  6360. }
  6361. void
  6362. Recycler::PrintHeuristicCollectionStats()
  6363. {
  6364. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6365. Output::Print(_u("GC Trigger : %10s %10s %10s"), _u("Start"), _u("Continue"), _u("Finish"));
  6366. #if ENABLE_PARTIAL_GC
  6367. if (this->enablePartialCollect)
  6368. {
  6369. Output::Print(_u(" | Heuristics : %10s %10s %5s"), _u(""), _u(""), _u("%"));
  6370. }
  6371. #endif
  6372. Output::Print(_u("\n"));
  6373. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6374. Output::Print(_u(" Alloc bytes : %10d %10d %10d"), collectionStats.startCollectAllocBytes, collectionStats.continueCollectAllocBytes, this->autoHeap.uncollectedAllocBytes);
  6375. #if ENABLE_PARTIAL_GC
  6376. if (this->enablePartialCollect)
  6377. {
  6378. Output::Print(_u(" | Cost : %10d %10d %5.1f"), collectionStats.rescanRootBytes, collectionStats.estimatedPartialReuseBytes, collectionStats.collectCost * 100);
  6379. }
  6380. #endif
  6381. Output::Print(_u("\n"));
  6382. #if ENABLE_PARTIAL_GC
  6383. if (this->enablePartialCollect)
  6384. {
  6385. Output::Print(_u(" | Efficacy : %10s %10s %5.1f\n"), _u(""), _u(""), collectionStats.collectEfficacy * 100);
  6386. }
  6387. #endif
  6388. #if ENABLE_PARTIAL_GC
  6389. if (this->enablePartialCollect)
  6390. {
  6391. Output::Print(_u(" New page : %10d %10s %10d"), collectionStats.startCollectNewPageCount, _u(""), autoHeap.uncollectedNewPageCount);
  6392. Output::Print(_u(" | Partial Uncollect New Page : %10d %10d"), collectionStats.uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize, this->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize);
  6393. Output::Print(_u("\n"));
  6394. }
  6395. #endif
  6396. Output::Print(_u(" Finish try : %10d %10s %10s"), collectionStats.finishCollectTryCount, _u(""), _u(""));
  6397. #if ENABLE_PARTIAL_GC
  6398. if (this->enablePartialCollect)
  6399. {
  6400. Output::Print(_u(" | Partial Reuse Min Free Bytes : %10d"), collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes * AutoSystemInfo::PageSize);
  6401. }
  6402. #endif
  6403. Output::Print(_u("\n"));
  6404. }
  6405. void
  6406. Recycler::PrintMarkCollectionStats()
  6407. {
  6408. size_t nonMark = collectionStats.tryMarkCount + collectionStats.tryMarkInteriorCount - collectionStats.remarkCount - collectionStats.markData.markCount;
  6409. size_t invalidCount = nonMark - collectionStats.tryMarkNullCount - collectionStats.tryMarkUnalignedCount
  6410. - collectionStats.tryMarkNonRecyclerMemoryCount
  6411. - collectionStats.tryMarkInteriorNonRecyclerMemoryCount
  6412. - collectionStats.tryMarkInteriorNullCount;
  6413. size_t leafCount = collectionStats.markData.markCount - collectionStats.scanCount;
  6414. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6415. Output::Print(_u("Try Mark :%9s %5s %10s | Non-Mark : %9s %5s | Mark :%9s %5s \n"), _u("Count"), _u("%"), _u("Bytes"), _u("Count"), _u("%"), _u("Count"), _u("%"));
  6416. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6417. Output::Print(_u(" TryMark :%9d %10d | Null : %9d %5.1f | Scan :%9d %5.1f\n"),
  6418. collectionStats.tryMarkCount, collectionStats.tryMarkCount * sizeof(void *),
  6419. collectionStats.tryMarkNullCount, (double)collectionStats.tryMarkNullCount / (double)nonMark * 100,
  6420. collectionStats.scanCount, (double)collectionStats.scanCount / (double)collectionStats.markData.markCount * 100);
  6421. Output::Print(_u(" Non-Mark :%9d %5.1f | Unaligned : %9d %5.1f | Leaf :%9d %5.1f\n"),
  6422. nonMark, (double)nonMark / (double)collectionStats.tryMarkCount * 100,
  6423. collectionStats.tryMarkUnalignedCount, (double)collectionStats.tryMarkUnalignedCount / (double)nonMark * 100,
  6424. leafCount, (double)leafCount / (double)collectionStats.markData.markCount * 100);
  6425. Output::Print(_u(" Mark :%9d %5.1f %10d | Non GC : %9d %5.1f | Track :%9d\n"),
  6426. collectionStats.markData.markCount, (double)collectionStats.markData.markCount / (double)collectionStats.tryMarkCount * 100, collectionStats.markData.markBytes,
  6427. collectionStats.tryMarkNonRecyclerMemoryCount, (double)collectionStats.tryMarkNonRecyclerMemoryCount / (double)nonMark * 100,
  6428. collectionStats.trackCount);
  6429. Output::Print(_u(" Remark :%9d %5.1f | Invalid : %9d %5.1f \n"),
  6430. collectionStats.remarkCount, (double)collectionStats.remarkCount / (double)collectionStats.tryMarkCount * 100,
  6431. invalidCount, (double)invalidCount / (double)nonMark * 100);
  6432. Output::Print(_u(" TryMark Int:%9d %10d | Null Int : %9d %5.1f | Root :%9d | New :%9d\n"),
  6433. collectionStats.tryMarkInteriorCount, collectionStats.tryMarkInteriorCount * sizeof(void *),
  6434. collectionStats.tryMarkInteriorNullCount, (double)collectionStats.tryMarkInteriorNullCount / (double)nonMark * 100,
  6435. collectionStats.rootCount, collectionStats.markThruNewObjCount);
  6436. Output::Print(_u(" | Non GC Int: %9d %5.1f | Stack :%9d | NewFalse:%9d\n"),
  6437. collectionStats.tryMarkInteriorNonRecyclerMemoryCount, (double)collectionStats.tryMarkInteriorNonRecyclerMemoryCount / (double)nonMark * 100,
  6438. collectionStats.stackCount, collectionStats.markThruFalseNewObjCount);
  6439. }
  6440. void
  6441. Recycler::PrintBackgroundCollectionStat(RecyclerCollectionStats::MarkData const& markData)
  6442. {
  6443. Output::Print(_u("BgSmall : %5d %6d %10d | BgLarge : %5d %6d %10d | BgMark :%9d "),
  6444. markData.rescanPageCount,
  6445. markData.rescanObjectCount,
  6446. markData.rescanObjectByteCount,
  6447. markData.rescanLargePageCount,
  6448. markData.rescanLargeObjectCount,
  6449. markData.rescanLargeByteCount,
  6450. markData.markCount);
  6451. double markRatio = (double)markData.markCount / (double)collectionStats.markData.markCount * 100;
  6452. if (markRatio == 100.0)
  6453. {
  6454. Output::Print(_u(" 100"));
  6455. }
  6456. else
  6457. {
  6458. Output::Print(_u("%4.1f"), markRatio);
  6459. }
  6460. Output::Print(_u("\n"));
  6461. }
  6462. void
  6463. Recycler::PrintBackgroundCollectionStats()
  6464. {
  6465. #if ENABLE_CONCURRENT_GC
  6466. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6467. Output::Print(_u("BgSmall : %5s %6s %10s | BgLarge : %5s %6s %10s | BgMark :%9s %4s %s\n"),
  6468. _u("Pages"), _u("Count"), _u("Bytes"), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"), _u("%"), _u("NonLeafBytes %"));
  6469. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6470. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[0]);
  6471. for (uint repeatCount = 1; repeatCount < RecyclerHeuristic::MaxBackgroundRepeatMarkCount; repeatCount++)
  6472. {
  6473. if (collectionStats.backgroundMarkData[repeatCount].markCount == 0)
  6474. {
  6475. break;
  6476. }
  6477. collectionStats.backgroundMarkData[repeatCount].rescanPageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanPageCount;
  6478. collectionStats.backgroundMarkData[repeatCount].rescanObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectCount;
  6479. collectionStats.backgroundMarkData[repeatCount].rescanObjectByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectByteCount;
  6480. collectionStats.backgroundMarkData[repeatCount].rescanLargePageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargePageCount;
  6481. collectionStats.backgroundMarkData[repeatCount].rescanLargeObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeObjectCount;
  6482. collectionStats.backgroundMarkData[repeatCount].rescanLargeByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeByteCount;
  6483. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[repeatCount]);
  6484. }
  6485. #endif
  6486. }
  6487. void
  6488. Recycler::PrintMemoryStats()
  6489. {
  6490. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6491. Output::Print(_u("Memory (Bytes) %4s %10s %10s %10s %6s %6s\n"), _u("Live"), _u("Free"), _u("Unused"), _u("Total"), _u("Free%"), _u("Unused%"));
  6492. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6493. PrintHeapBlockMemoryStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6494. Output::Print(_u("\n"));
  6495. PrintHeapBlockMemoryStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6496. Output::Print(_u("\n"));
  6497. #ifdef RECYCLER_WRITE_BARRIER
  6498. PrintHeapBlockMemoryStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6499. Output::Print(_u("\n"));
  6500. PrintHeapBlockMemoryStats(_u("SmFinSWB"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6501. Output::Print(_u("\n"));
  6502. #endif
  6503. PrintHeapBlockMemoryStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6504. Output::Print(_u("\n"));
  6505. PrintHeapBlockMemoryStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6506. Output::Print(_u("\n"));
  6507. PrintHeapBlockMemoryStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6508. Output::Print(_u("\n"));
  6509. #ifdef RECYCLER_WRITE_BARRIER
  6510. PrintHeapBlockMemoryStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6511. Output::Print(_u("\n"));
  6512. PrintHeapBlockMemoryStats(_u("MdFinSWB"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6513. Output::Print(_u("\n"));
  6514. #endif
  6515. PrintHeapBlockMemoryStats(_u("MdLeaf"), HeapBlock::MediumLeafBlockType);
  6516. Output::Print(_u("\n"));
  6517. size_t largeHeapBlockUnusedByteCount = collectionStats.largeHeapBlockTotalByteCount - collectionStats.largeHeapBlockUsedByteCount
  6518. - collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType];
  6519. Output::Print(_u(" Large: %10d %10d %10d %10d %6.1f %6.1f\n"),
  6520. collectionStats.largeHeapBlockUsedByteCount,
  6521. collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType],
  6522. largeHeapBlockUnusedByteCount,
  6523. collectionStats.largeHeapBlockTotalByteCount,
  6524. (double)collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType] / (double)collectionStats.largeHeapBlockTotalByteCount * 100,
  6525. (double)largeHeapBlockUnusedByteCount / (double)collectionStats.largeHeapBlockTotalByteCount * 100);
  6526. Output::Print(_u("\nSmall heap block zeroing stats since last GC\n"));
  6527. Output::Print(_u("Number of blocks with sweep state empty: normal=%d finalizable=%d leaf=%d\nNumber of blocks zeroed: %d\n"),
  6528. collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockType]
  6529. #ifdef RECYCLER_WRITE_BARRIER
  6530. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockWithBarrierType]
  6531. #endif
  6532. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockType]
  6533. #ifdef RECYCLER_WRITE_BARRIER
  6534. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockWithBarrierType]
  6535. #endif
  6536. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockType]
  6537. #ifdef RECYCLER_WRITE_BARRIER
  6538. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockWithBarrierType]
  6539. #endif
  6540. , collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockType]
  6541. #ifdef RECYCLER_WRITE_BARRIER
  6542. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockWithBarrierType]
  6543. #endif
  6544. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallLeafBlockType]
  6545. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumLeafBlockType],
  6546. collectionStats.numZeroedOutSmallBlocks);
  6547. }
  6548. void
  6549. Recycler::PrintCollectStats()
  6550. {
  6551. Output::Print(_u("Collection Stats:\n"));
  6552. PrintHeuristicCollectionStats();
  6553. PrintMarkCollectionStats();
  6554. PrintBackgroundCollectionStats();
  6555. size_t freeCount = collectionStats.objectSweptCount - collectionStats.objectSweptFreeListCount;
  6556. size_t freeBytes = collectionStats.objectSweptBytes - collectionStats.objectSweptFreeListBytes;
  6557. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6558. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6559. Output::Print(_u("Rescan : %5s %6s %10s | Track : %5s | "), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"));
  6560. #endif
  6561. Output::Print(_u("Sweep : %7s | SweptObj : %5s %5s %10s\n"), _u("Count"), _u("Count"), _u("%%"), _u("Bytes"));
  6562. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6563. Output::Print(_u(" Small : "));
  6564. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6565. Output::Print(_u("%5d %6d %10d | "), collectionStats.markData.rescanPageCount, collectionStats.markData.rescanObjectCount, collectionStats.markData.rescanObjectByteCount);
  6566. #endif
  6567. #if ENABLE_CONCURRENT_GC
  6568. Output::Print(_u("Process : %5d | "), collectionStats.trackedObjectCount);
  6569. #else
  6570. Output::Print(_u(" | "));
  6571. #endif
  6572. Output::Print(_u(" Scan : %7d | Free : %6d %5.1f %10d\n"),
  6573. collectionStats.objectSweepScanCount,
  6574. freeCount, (double)freeCount / (double) collectionStats.objectSweptCount * 100, freeBytes);
  6575. Output::Print(_u(" Large : "));
  6576. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6577. Output::Print(_u("%5d %6d %10d | "),
  6578. collectionStats.markData.rescanLargePageCount, collectionStats.markData.rescanLargeObjectCount, collectionStats.markData.rescanLargeByteCount);
  6579. #endif
  6580. #if ENABLE_PARTIAL_GC
  6581. Output::Print(_u("Client : %5d | "), collectionStats.clientTrackedObjectCount);
  6582. #else
  6583. Output::Print(_u(" | "));
  6584. #endif
  6585. Output::Print(_u(" Finalize : %7d | Free List: %6d %5.1f %10d\n"),
  6586. collectionStats.finalizeSweepCount,
  6587. collectionStats.objectSweptFreeListCount, (double)collectionStats.objectSweptFreeListCount / (double) collectionStats.objectSweptCount * 100, collectionStats.objectSweptFreeListBytes);
  6588. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6589. Output::Print(_u("SweptBlk: Live Free Total Free%% : Swept Swept%% : CSwpt CSwpt%%"));
  6590. #if ENABLE_PARTIAL_GC
  6591. if (this->enablePartialCollect)
  6592. {
  6593. Output::Print(_u(" | Partial : Count Bytes Existing"));
  6594. }
  6595. #endif
  6596. Output::Print(_u("\n"));
  6597. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6598. PrintHeapBlockStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6599. #if ENABLE_PARTIAL_GC
  6600. if (this->enablePartialCollect)
  6601. {
  6602. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6603. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType],
  6604. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6605. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType] * AutoSystemInfo::PageSize
  6606. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallNormalBlockType]);
  6607. }
  6608. #endif
  6609. Output::Print(_u("\n"));
  6610. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6611. #if ENABLE_PARTIAL_GC
  6612. if (this->enablePartialCollect)
  6613. {
  6614. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6615. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6616. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6617. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6618. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6619. }
  6620. #endif
  6621. Output::Print(_u("\n"));
  6622. #ifdef RECYCLER_WRITE_BARRIER
  6623. PrintHeapBlockStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6624. #if ENABLE_PARTIAL_GC
  6625. if (this->enablePartialCollect)
  6626. {
  6627. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6628. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType],
  6629. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType],
  6630. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6631. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType]);
  6632. }
  6633. #endif
  6634. Output::Print(_u("\n"));
  6635. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6636. #if ENABLE_PARTIAL_GC
  6637. if (this->enablePartialCollect)
  6638. {
  6639. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6640. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6641. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6642. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6643. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType]);
  6644. }
  6645. #endif
  6646. Output::Print(_u("\n"));
  6647. #endif
  6648. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6649. PrintHeapBlockStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6650. #if ENABLE_PARTIAL_GC
  6651. if (this->enablePartialCollect)
  6652. {
  6653. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6654. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType],
  6655. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType],
  6656. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6657. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType]);
  6658. }
  6659. #endif
  6660. Output::Print(_u("\n"));
  6661. PrintHeapBlockStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6662. #if ENABLE_PARTIAL_GC
  6663. if (this->enablePartialCollect)
  6664. {
  6665. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6666. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType],
  6667. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6668. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType] * AutoSystemInfo::PageSize
  6669. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType]);
  6670. }
  6671. #endif
  6672. Output::Print(_u("\n"));
  6673. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6674. #if ENABLE_PARTIAL_GC
  6675. if (this->enablePartialCollect)
  6676. {
  6677. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6678. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType],
  6679. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType],
  6680. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6681. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType]);
  6682. }
  6683. #endif
  6684. Output::Print(_u("\n"));
  6685. #ifdef RECYCLER_WRITE_BARRIER
  6686. PrintHeapBlockStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6687. #if ENABLE_PARTIAL_GC
  6688. if (this->enablePartialCollect)
  6689. {
  6690. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6691. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType],
  6692. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType],
  6693. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6694. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType]);
  6695. }
  6696. #endif
  6697. Output::Print(_u("\n"));
  6698. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6699. #if ENABLE_PARTIAL_GC
  6700. if (this->enablePartialCollect)
  6701. {
  6702. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6703. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6704. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6705. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6706. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType]);
  6707. }
  6708. #endif
  6709. Output::Print(_u("\n"));
  6710. #endif
  6711. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6712. PrintHeapBlockStats(_u("MdLeaf"), HeapBlock::MediumNormalBlockType);
  6713. #if ENABLE_PARTIAL_GC
  6714. if (this->enablePartialCollect)
  6715. {
  6716. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6717. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType],
  6718. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType],
  6719. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6720. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType]);
  6721. }
  6722. #endif
  6723. Output::Print(_u("\n"));
  6724. // TODO: This can't possibly be correct...check on this later
  6725. PrintHeapBlockStats(_u("Large"), HeapBlock::LargeBlockType);
  6726. #if ENABLE_PARTIAL_GC
  6727. if (this->enablePartialCollect)
  6728. {
  6729. Output::Print(_u(" | UnusedFin : %5d %10d %10d"),
  6730. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6731. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6732. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6733. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6734. }
  6735. #endif
  6736. Output::Print(_u("\n"));
  6737. PrintMemoryStats();
  6738. Output::Flush();
  6739. }
  6740. #endif
  6741. #ifdef RECYCLER_PAGE_HEAP
  6742. void Recycler::VerifyPageHeapFillAfterAlloc(char* memBlock, size_t size, ObjectInfoBits attributes)
  6743. {
  6744. Assert(memBlock != nullptr);
  6745. if (IsPageHeapEnabled())
  6746. {
  6747. HeapBlock* heapBlock = this->FindHeapBlock(memBlock);
  6748. Assert(heapBlock);
  6749. if (heapBlock->IsLargeHeapBlock())
  6750. {
  6751. LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
  6752. if (largeHeapBlock->InPageHeapMode()
  6753. #ifdef RECYCLER_NO_PAGE_REUSE
  6754. && !largeHeapBlock->GetPageAllocator(largeHeapBlock->heapInfo)->IsPageReuseDisabled()
  6755. #endif
  6756. )
  6757. {
  6758. largeHeapBlock->VerifyPageHeapPattern();
  6759. }
  6760. }
  6761. }
  6762. }
  6763. #endif
  6764. #ifdef RECYCLER_ZERO_MEM_CHECK
  6765. void
  6766. Recycler::VerifyZeroFill(void * address, size_t size)
  6767. {
  6768. byte expectedFill = 0;
  6769. #ifdef RECYCLER_MEMORY_VERIFY
  6770. if (this->VerifyEnabled())
  6771. {
  6772. expectedFill = Recycler::VerifyMemFill;
  6773. }
  6774. #endif
  6775. Assert(IsAll((byte *)address, size, expectedFill));
  6776. }
  6777. void
  6778. Recycler::VerifyLargeAllocZeroFill(void * address, size_t size, ObjectInfoBits attributes)
  6779. {
  6780. // Large allocs will have already written the dummy vtable at the beginning of the allocation
  6781. // if either FinalizeBit or TrackBit attributes were set. Skip the verify for that memory
  6782. // if that is the case.
  6783. if ((attributes & (FinalizeBit | TrackBit)) != 0)
  6784. {
  6785. // Verify that it really is the dummy v-table before skipping it.
  6786. DummyVTableObject dummy;
  6787. Assert((*(void**)(&dummy)) == *((void**)address));
  6788. address = ((char*)address) + sizeof(DummyVTableObject);
  6789. size -= sizeof(DummyVTableObject);
  6790. }
  6791. VerifyZeroFill(address, size);
  6792. }
  6793. #endif
  6794. #ifdef RECYCLER_MEMORY_VERIFY
  6795. void
  6796. Recycler::FillCheckPad(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6797. {
  6798. if (this->VerifyEnabled())
  6799. {
  6800. void* addressToVerify = address;
  6801. size_t sizeToVerify = alignedAllocSize;
  6802. if (objectAlreadyInitialized)
  6803. {
  6804. addressToVerify = ((char*) address + size);
  6805. sizeToVerify = (alignedAllocSize - size);
  6806. }
  6807. else
  6808. {
  6809. // It could be the case that an uninitialized object already has a dummy vtable installed
  6810. // at the beginning of the address. If that is the case, we can't verify the fill pattern
  6811. // on that memory, since it's already been initialized.
  6812. // Note that FillPadNoCheck will skip over the first sizeof(FreeObject) bytes, which
  6813. // prevents overwriting of the vtable.
  6814. static_assert(sizeof(DummyVTableObject) == sizeof(void*), "Incorrect size for a DummyVTableObject - it must contain a single v-table pointer");
  6815. DummyVTableObject dummy;
  6816. if ((*(void**)(&dummy)) == *((void**)address))
  6817. {
  6818. addressToVerify = (char*)address + sizeof(DummyVTableObject);
  6819. sizeToVerify = alignedAllocSize - sizeof(DummyVTableObject);
  6820. }
  6821. }
  6822. // Actually this is filling the non-pad to zero
  6823. VerifyCheckFill(addressToVerify, sizeToVerify - sizeof(size_t));
  6824. FillPadNoCheck(address, size, alignedAllocSize, objectAlreadyInitialized);
  6825. }
  6826. }
  6827. void
  6828. Recycler::FillPadNoCheck(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6829. {
  6830. // Ignore the first word
  6831. if (!objectAlreadyInitialized && size > sizeof(FreeObject))
  6832. {
  6833. memset((char *)address + sizeof(FreeObject), 0, size - sizeof(FreeObject));
  6834. }
  6835. // write the pad size at the end;
  6836. *(size_t *)((char *)address + alignedAllocSize - sizeof(size_t)) = alignedAllocSize - size;
  6837. }
  6838. void Recycler::Verify(Js::Phase phase)
  6839. {
  6840. if (verifyEnabled && (!this->CollectionInProgress()))
  6841. {
  6842. if (GetRecyclerFlagsTable().RecyclerVerify.IsEnabled(phase))
  6843. {
  6844. autoHeap.Verify();
  6845. }
  6846. }
  6847. }
  6848. void Recycler::VerifyCheck(BOOL cond, char16 const * msg, void * address, void * corruptedAddress)
  6849. {
  6850. if (!(cond))
  6851. {
  6852. fwprintf(stderr, _u("RECYCLER CORRUPTION: StartAddress=%p CorruptedAddress=%p: %s"), address, corruptedAddress, msg);
  6853. Js::Throw::FatalInternalError();
  6854. }
  6855. }
  6856. void Recycler::VerifyCheckFill(void * address, size_t size)
  6857. {
  6858. Assert(IsAll((byte*)address, size, Recycler::VerifyMemFill));
  6859. }
  6860. void Recycler::VerifyCheckPadExplicitFreeList(void * address, size_t size)
  6861. {
  6862. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6863. size_t padding = *paddingAddress;
  6864. #pragma warning(suppress:4310)
  6865. Assert(padding != (size_t)0xCACACACACACACACA); // Explicit free objects have to have been initialized at some point before they were freed
  6866. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6867. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6868. {
  6869. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6870. }
  6871. }
  6872. void Recycler::VerifyCheckPad(void * address, size_t size)
  6873. {
  6874. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6875. size_t padding = *paddingAddress;
  6876. #pragma warning(suppress:4310)
  6877. if (padding == (size_t)0xCACACACACACACACA)
  6878. {
  6879. // Nascent block have objects that are not initialized with pad size
  6880. Recycler::VerifyCheckFill(address, size);
  6881. return;
  6882. }
  6883. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6884. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6885. {
  6886. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6887. }
  6888. }
  6889. #endif
  6890. Recycler::AutoSetupRecyclerForNonCollectingMark::AutoSetupRecyclerForNonCollectingMark(Recycler& recycler, bool setupForHeapEnumeration)
  6891. : m_recycler(recycler), m_setupDone(false)
  6892. {
  6893. if (! setupForHeapEnumeration)
  6894. {
  6895. DoCommonSetup();
  6896. }
  6897. }
  6898. void Recycler::AutoSetupRecyclerForNonCollectingMark::DoCommonSetup()
  6899. {
  6900. Assert(m_recycler.collectionState == CollectionStateNotCollecting || m_recycler.collectionState == CollectionStateExit);
  6901. #if ENABLE_CONCURRENT_GC
  6902. Assert(!m_recycler.DoQueueTrackedObject());
  6903. #endif
  6904. #if ENABLE_PARTIAL_GC
  6905. // We need to get out of partial collect before we do the mark because we
  6906. // will mess with the free bit vector state
  6907. // GC-CONSIDER: don't mess with the free bit vector?
  6908. if (m_recycler.inPartialCollectMode)
  6909. {
  6910. m_recycler.FinishPartialCollect();
  6911. }
  6912. #endif
  6913. m_previousCollectionState = m_recycler.collectionState;
  6914. #ifdef RECYCLER_STATS
  6915. m_previousCollectionStats = m_recycler.collectionStats;
  6916. memset(&m_recycler.collectionStats, 0, sizeof(RecyclerCollectionStats));
  6917. #endif
  6918. m_setupDone = true;
  6919. }
  6920. void Recycler::AutoSetupRecyclerForNonCollectingMark::SetupForHeapEnumeration()
  6921. {
  6922. Assert(!m_recycler.isHeapEnumInProgress);
  6923. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6924. m_recycler.EnsureNotCollecting();
  6925. DoCommonSetup();
  6926. m_recycler.ResetMarks(ResetMarkFlags_HeapEnumeration);
  6927. m_recycler.SetCollectionState(CollectionStateNotCollecting);
  6928. m_recycler.isHeapEnumInProgress = true;
  6929. m_recycler.isCollectionDisabled = true;
  6930. }
  6931. Recycler::AutoSetupRecyclerForNonCollectingMark::~AutoSetupRecyclerForNonCollectingMark()
  6932. {
  6933. Assert(m_setupDone);
  6934. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6935. #ifdef RECYCLER_STATS
  6936. m_recycler.collectionStats = m_previousCollectionStats;
  6937. #endif
  6938. m_recycler.SetCollectionState(m_previousCollectionState);
  6939. m_recycler.isHeapEnumInProgress = false;
  6940. m_recycler.isCollectionDisabled = false;
  6941. }
  6942. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  6943. bool Recycler::DumpObjectGraph(RecyclerObjectGraphDumper::Param * param)
  6944. {
  6945. bool succeeded = false;
  6946. bool isExited = (this->collectionState == CollectionStateExit);
  6947. if (isExited)
  6948. {
  6949. this->SetCollectionState(CollectionStateNotCollecting);
  6950. }
  6951. if (this->collectionState != CollectionStateNotCollecting)
  6952. {
  6953. Output::Print(_u("Can't dump object graph when collecting\n"));
  6954. Output::Flush();
  6955. return succeeded;
  6956. }
  6957. BEGIN_NO_EXCEPTION
  6958. {
  6959. RecyclerObjectGraphDumper objectGraphDumper(this, param);
  6960. Recycler::AutoSetupRecyclerForNonCollectingMark AutoSetupRecyclerForNonCollectingMark(*this);
  6961. AutoRestoreValue<bool> skipStackToggle(&this->skipStack, this->skipStack || (param && param->skipStack));
  6962. this->Mark();
  6963. this->objectGraphDumper = nullptr;
  6964. #ifdef RECYCLER_STATS
  6965. if (param)
  6966. {
  6967. param->stats = this->collectionStats;
  6968. }
  6969. #endif
  6970. succeeded = !objectGraphDumper.isOutOfMemory;
  6971. }
  6972. END_NO_EXCEPTION
  6973. if (isExited)
  6974. {
  6975. this->SetCollectionState(CollectionStateExit);
  6976. }
  6977. if (!succeeded)
  6978. {
  6979. Output::Print(_u("Out of memory dumping object graph\n"));
  6980. }
  6981. Output::Flush();
  6982. return succeeded;
  6983. }
  6984. void
  6985. Recycler::DumpObjectDescription(void *objectAddress)
  6986. {
  6987. #ifdef PROFILE_RECYCLER_ALLOC
  6988. type_info const * typeinfo = nullptr;
  6989. bool isArray = false;
  6990. if (this->trackerDictionary)
  6991. {
  6992. TrackerData * trackerData = GetTrackerData(objectAddress);
  6993. if (trackerData != nullptr)
  6994. {
  6995. typeinfo = trackerData->typeinfo;
  6996. isArray = trackerData->isArray;
  6997. }
  6998. else
  6999. {
  7000. Assert(false);
  7001. }
  7002. }
  7003. RecyclerObjectDumper::DumpObject(typeinfo, isArray, objectAddress);
  7004. #else
  7005. Output::Print(_u("Address %p"), objectAddress);
  7006. #endif
  7007. }
  7008. #endif
  7009. #ifdef RECYCLER_STRESS
  7010. // All stress mode collect art implicitly instantiate here
  7011. bool
  7012. Recycler::StressCollectNow()
  7013. {
  7014. if (this->recyclerStress)
  7015. {
  7016. this->CollectNow<CollectStress>();
  7017. return true;
  7018. }
  7019. #if ENABLE_CONCURRENT_GC
  7020. else if (this->recyclerBackgroundStress)
  7021. {
  7022. this->CollectNow<CollectBackgroundStress>();
  7023. return true;
  7024. }
  7025. else if ((this->enableConcurrentMark || this->enableConcurrentSweep)
  7026. && (this->recyclerConcurrentStress
  7027. || this->recyclerConcurrentRepeatStress))
  7028. {
  7029. #if ENABLE_PARTIAL_GC
  7030. if (this->recyclerPartialStress)
  7031. {
  7032. this->CollectNow<CollectConcurrentPartialStress>();
  7033. return true;
  7034. }
  7035. else
  7036. #endif // ENABLE_PARTIAL_GC
  7037. {
  7038. this->CollectNow<CollectConcurrentStress>();
  7039. return true;
  7040. }
  7041. }
  7042. #endif // ENABLE_CONCURRENT_GC
  7043. #if ENABLE_PARTIAL_GC
  7044. else if (this->recyclerPartialStress)
  7045. {
  7046. this->CollectNow<CollectPartialStress>();
  7047. return true;
  7048. }
  7049. #endif // ENABLE_PARTIAL_GC
  7050. return false;
  7051. }
  7052. #endif // RECYCLER_STRESS
  7053. #ifdef TRACK_ALLOC
  7054. Recycler *
  7055. Recycler::TrackAllocInfo(TrackAllocData const& data)
  7056. {
  7057. #ifdef PROFILE_RECYCLER_ALLOC
  7058. if (this->trackerDictionary != nullptr)
  7059. {
  7060. Assert(nextAllocData.IsEmpty());
  7061. nextAllocData = data;
  7062. }
  7063. #endif
  7064. return this;
  7065. }
  7066. void
  7067. Recycler::ClearTrackAllocInfo(TrackAllocData* data/* = NULL*/)
  7068. {
  7069. #ifdef PROFILE_RECYCLER_ALLOC
  7070. if (this->trackerDictionary != nullptr)
  7071. {
  7072. AssertMsg(!nextAllocData.IsEmpty(), "Missing tracking information for this allocation, are you not using the macros?");
  7073. if (data)
  7074. {
  7075. *data = nextAllocData;
  7076. }
  7077. nextAllocData.Clear();
  7078. }
  7079. #endif
  7080. }
  7081. #ifdef PROFILE_RECYCLER_ALLOC
  7082. bool
  7083. Recycler::DoProfileAllocTracker()
  7084. {
  7085. bool doTracker = false;
  7086. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  7087. doTracker = Js::Configuration::Global.flags.DumpObjectGraphOnExit
  7088. || Js::Configuration::Global.flags.DumpObjectGraphOnCollect
  7089. || Js::Configuration::Global.flags.DumpObjectGraphOnEnum;
  7090. #endif
  7091. #ifdef LEAK_REPORT
  7092. if (Js::Configuration::Global.flags.IsEnabled(Js::LeakReportFlag))
  7093. {
  7094. doTracker = true;
  7095. }
  7096. #endif
  7097. #ifdef CHECK_MEMORY_LEAK
  7098. if (Js::Configuration::Global.flags.CheckMemoryLeak)
  7099. {
  7100. doTracker = true;
  7101. }
  7102. #endif
  7103. if (CONFIG_FLAG(KeepRecyclerTrackData))
  7104. {
  7105. doTracker = true;
  7106. }
  7107. return doTracker || MemoryProfiler::DoTrackRecyclerAllocation();
  7108. }
  7109. void
  7110. Recycler::InitializeProfileAllocTracker()
  7111. {
  7112. if (DoProfileAllocTracker())
  7113. {
  7114. trackerDictionary = NoCheckHeapNew(TypeInfotoTrackerItemMap, &NoCheckHeapAllocator::Instance, 163);
  7115. trackerCriticalSection = new CriticalSection(1000);
  7116. #pragma prefast(suppress:6031, "InitializeCriticalSectionAndSpinCount always succeed since Vista. No need to check return value");
  7117. }
  7118. nextAllocData.Clear();
  7119. }
  7120. void
  7121. Recycler::TrackAllocCore(void * object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  7122. {
  7123. auto&& typeInfo = trackAllocData.GetTypeInfo();
  7124. if (CONFIG_FLAG(KeepRecyclerTrackData))
  7125. {
  7126. TrackFree((char*)object, size);
  7127. }
  7128. Assert(GetTrackerData(object) == nullptr || GetTrackerData(object) == &TrackerData::ExplicitFreeListObjectData);
  7129. Assert(typeInfo != nullptr);
  7130. TrackerItem * item;
  7131. size_t allocCount = trackAllocData.GetCount();
  7132. size_t itemSize = (size - trackAllocData.GetPlusSize());
  7133. bool isArray;
  7134. if (allocCount != (size_t)-1)
  7135. {
  7136. isArray = true;
  7137. itemSize = itemSize / allocCount;
  7138. }
  7139. else
  7140. {
  7141. isArray = false;
  7142. allocCount = 1;
  7143. }
  7144. if (!trackerDictionary->TryGetValue(typeInfo, &item))
  7145. {
  7146. #ifdef STACK_BACK_TRACE
  7147. if (CONFIG_FLAG(KeepRecyclerTrackData) && isArray) // type info is not useful record stack instead
  7148. {
  7149. size_t stackTraceSize = 16 * sizeof(void*);
  7150. item = NoCheckHeapNewPlus(stackTraceSize, TrackerItem, typeInfo);
  7151. StackBackTrace::Capture((char*)&item[1], stackTraceSize, 7);
  7152. }
  7153. else
  7154. #endif
  7155. {
  7156. item = NoCheckHeapNew(TrackerItem, typeInfo);
  7157. }
  7158. item->instanceData.ItemSize = itemSize;
  7159. item->arrayData.ItemSize = itemSize;
  7160. trackerDictionary->Item(typeInfo, item);
  7161. }
  7162. else
  7163. {
  7164. Assert(item->instanceData.typeinfo == typeInfo);
  7165. Assert(item->instanceData.ItemSize == itemSize);
  7166. Assert(item->arrayData.ItemSize == itemSize);
  7167. }
  7168. TrackerData& data = (isArray)? item->arrayData : item->instanceData;
  7169. data.ItemCount += allocCount;
  7170. data.AllocCount++;
  7171. data.ReqSize += size;
  7172. data.AllocSize += HeapInfo::GetAlignedSizeNoCheck(size);
  7173. #ifdef TRACE_OBJECT_LIFETIME
  7174. data.TraceLifetime = traceLifetime;
  7175. if (traceLifetime)
  7176. {
  7177. Output::Print(data.isArray ? _u("Allocated %S[] %p\n") : _u("Allocated %S %p\n"), data.typeinfo->name(), object);
  7178. }
  7179. #endif
  7180. #ifdef PERF_COUNTERS
  7181. ++data.counter;
  7182. data.sizeCounter += HeapInfo::GetAlignedSizeNoCheck(size);
  7183. #endif
  7184. SetTrackerData(object, &data);
  7185. }
  7186. void* Recycler::TrackAlloc(void* object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  7187. {
  7188. if (this->trackerDictionary != nullptr)
  7189. {
  7190. Assert(nextAllocData.IsEmpty()); // should have been cleared
  7191. trackerCriticalSection->Enter();
  7192. TrackAllocCore(object, size, trackAllocData);
  7193. trackerCriticalSection->Leave();
  7194. }
  7195. return object;
  7196. }
  7197. void
  7198. Recycler::TrackIntegrate(__in_ecount(blockSize) char * blockAddress, size_t blockSize, size_t allocSize, size_t objectSize, const TrackAllocData& trackAllocData)
  7199. {
  7200. if (this->trackerDictionary != nullptr)
  7201. {
  7202. Assert(nextAllocData.IsEmpty()); // should have been cleared
  7203. trackerCriticalSection->Enter();
  7204. char * address = blockAddress;
  7205. char * blockEnd = blockAddress + blockSize;
  7206. while (address + allocSize <= blockEnd)
  7207. {
  7208. TrackAllocCore(address, objectSize, trackAllocData);
  7209. address += allocSize;
  7210. }
  7211. trackerCriticalSection->Leave();
  7212. }
  7213. }
  7214. BOOL Recycler::TrackFree(const char* address, size_t size)
  7215. {
  7216. if (this->trackerDictionary != nullptr)
  7217. {
  7218. trackerCriticalSection->Enter();
  7219. TrackerData * data = GetTrackerData((char *)address);
  7220. if (data != nullptr)
  7221. {
  7222. if (data != &TrackerData::EmptyData)
  7223. {
  7224. #ifdef PERF_COUNTERS
  7225. --data->counter;
  7226. data->sizeCounter -= size;
  7227. #endif
  7228. if (data->typeinfo == &typeid(RecyclerWeakReferenceBase))
  7229. {
  7230. TrackFreeWeakRef((RecyclerWeakReferenceBase *)address);
  7231. }
  7232. data->FreeSize += size;
  7233. data->FreeCount++;
  7234. #ifdef TRACE_OBJECT_LIFETIME
  7235. if (data->TraceLifetime)
  7236. {
  7237. Output::Print(data->isArray ? _u("Freed %S[] %p\n") : _u("Freed %S %p\n"), data->typeinfo->name(), address);
  7238. }
  7239. #endif
  7240. }
  7241. SetTrackerData((char *)address, nullptr);
  7242. }
  7243. else
  7244. {
  7245. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  7246. {
  7247. Assert(false);
  7248. }
  7249. }
  7250. trackerCriticalSection->Leave();
  7251. }
  7252. return true;
  7253. }
  7254. Recycler::TrackerData *
  7255. Recycler::GetTrackerData(void * address)
  7256. {
  7257. HeapBlock * heapBlock = this->FindHeapBlock(address);
  7258. Assert(heapBlock != nullptr);
  7259. return (Recycler::TrackerData *)heapBlock->GetTrackerData(address);
  7260. }
  7261. void
  7262. Recycler::SetTrackerData(void * address, TrackerData * data)
  7263. {
  7264. HeapBlock * heapBlock = this->FindHeapBlock(address);
  7265. Assert(heapBlock != nullptr);
  7266. heapBlock->SetTrackerData(address, data);
  7267. }
  7268. void
  7269. Recycler::TrackUnallocated(__in char* address, __in char *endAddress, size_t sizeCat)
  7270. {
  7271. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  7272. {
  7273. if (this->trackerDictionary != nullptr)
  7274. {
  7275. trackerCriticalSection->Enter();
  7276. while (address + sizeCat <= endAddress)
  7277. {
  7278. Assert(GetTrackerData(address) == nullptr);
  7279. SetTrackerData(address, &TrackerData::EmptyData);
  7280. address += sizeCat;
  7281. }
  7282. trackerCriticalSection->Leave();
  7283. }
  7284. }
  7285. }
  7286. void
  7287. Recycler::TrackAllocWeakRef(RecyclerWeakReferenceBase * weakRef)
  7288. {
  7289. #if ENABLE_RECYCLER_TYPE_TRACKING
  7290. Assert(weakRef->typeInfo != nullptr);
  7291. #endif
  7292. #if DBG && defined(PERF_COUNTERS)
  7293. if (this->trackerDictionary != nullptr)
  7294. {
  7295. TrackerItem * item;
  7296. if (trackerDictionary->TryGetValue(weakRef->typeInfo, &item))
  7297. {
  7298. weakRef->counter = &item->weakRefCounter;
  7299. }
  7300. else
  7301. {
  7302. weakRef->counter = &PerfCounter::RecyclerTrackerCounterSet::GetWeakRefPerfCounter(weakRef->typeInfo);
  7303. }
  7304. ++(*weakRef->counter);
  7305. }
  7306. #endif
  7307. }
  7308. void
  7309. Recycler::TrackFreeWeakRef(RecyclerWeakReferenceBase * weakRef)
  7310. {
  7311. #if DBG && defined(PERF_COUNTERS)
  7312. if (weakRef->counter != nullptr)
  7313. {
  7314. --(*weakRef->counter);
  7315. }
  7316. #endif
  7317. }
  7318. void
  7319. Recycler::PrintAllocStats()
  7320. {
  7321. if (this->trackerDictionary == nullptr)
  7322. {
  7323. return;
  7324. }
  7325. size_t itemCount = 0;
  7326. int allocCount = 0;
  7327. int64 reqSize = 0;
  7328. int64 allocSize = 0;
  7329. int freeCount = 0;
  7330. int64 freeSize = 0;
  7331. Output::Print(_u("=================================================================================================================\n"));
  7332. Output::Print(_u("Recycler Allocations\n"));
  7333. Output::Print(_u("=================================================================================================================\n"));
  7334. Output::Print(_u("ItemSize ItemCount AllocCount RequestSize AllocSize FreeCount FreeSize DiffCount DiffSize \n"));
  7335. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  7336. for (int i = 0; i < trackerDictionary->Count(); i++)
  7337. {
  7338. TrackerItem * item = trackerDictionary->GetValueAt(i);
  7339. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  7340. if (item->instanceData.AllocCount != 0)
  7341. {
  7342. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S\n"),
  7343. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  7344. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  7345. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  7346. itemCount += item->instanceData.ItemCount;
  7347. allocCount += item->instanceData.AllocCount;
  7348. reqSize += item->instanceData.ReqSize;
  7349. allocSize += item->instanceData.AllocSize;
  7350. freeCount += item->instanceData.FreeCount;
  7351. freeSize += item->instanceData.FreeSize;
  7352. }
  7353. if (item->arrayData.AllocCount != 0)
  7354. {
  7355. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S[]\n"),
  7356. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  7357. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  7358. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  7359. itemCount += item->arrayData.ItemCount;
  7360. allocCount += item->arrayData.AllocCount;
  7361. reqSize += item->arrayData.ReqSize;
  7362. allocSize += item->arrayData.AllocSize;
  7363. freeCount += item->arrayData.FreeCount;
  7364. freeSize += item->arrayData.FreeSize;
  7365. }
  7366. }
  7367. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  7368. Output::Print(_u(" %8d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d **Total**\n"),
  7369. itemCount, allocCount, reqSize, allocSize, freeCount, freeSize, allocCount - freeCount, allocSize - freeSize);
  7370. #ifdef EXCEL_FRIENDLY_DUMP
  7371. Output::Print(_u("\nExcel friendly version\nItemSize\tItemCount\tAllocCount\tRequestSize\tAllocSize\tFreeCount\tFreeSize\tDiffCount\tDiffSize\tType\n"));
  7372. for (int i = 0; i < trackerDictionary->Count(); i++)
  7373. {
  7374. TrackerItem * item = trackerDictionary->GetValueAt(i);
  7375. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  7376. if (item->instanceData.AllocCount != 0)
  7377. {
  7378. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S\n"),
  7379. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  7380. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  7381. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  7382. }
  7383. if (item->arrayData.AllocCount != 0)
  7384. {
  7385. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S[]\n"),
  7386. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  7387. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  7388. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  7389. }
  7390. }
  7391. #endif // EXCEL_FRIENDLY_DUMP
  7392. Output::Flush();
  7393. }
  7394. #endif // PROFILE_RECYCLER_ALLOC
  7395. #endif // TRACK_ALLOC
  7396. #ifdef RECYCLER_VERIFY_MARK
  7397. void
  7398. Recycler::VerifyMark()
  7399. {
  7400. VerifyMarkRoots();
  7401. // Can't really verify stack since the recycler code between ScanStack to now may have introduce false references.
  7402. // VerifyMarkStack();
  7403. autoHeap.VerifyMark();
  7404. }
  7405. void
  7406. Recycler::VerifyMarkRoots()
  7407. {
  7408. {
  7409. this->VerifyMark(transientPinnedObject);
  7410. pinnedObjectMap.Map([this](void * obj, PinRecord const &refCount)
  7411. {
  7412. if (refCount == 0)
  7413. {
  7414. Assert(this->hasPendingUnpinnedObject);
  7415. }
  7416. else
  7417. {
  7418. // Use the pinrecord as the source reference
  7419. this->VerifyMark(obj);
  7420. }
  7421. });
  7422. }
  7423. DList<GuestArenaAllocator, HeapAllocator>::Iterator guestArenaIter(&guestArenaList);
  7424. while (guestArenaIter.Next())
  7425. {
  7426. if (guestArenaIter.Data().pendingDelete)
  7427. {
  7428. Assert(this->hasPendingDeleteGuestArena);
  7429. }
  7430. else
  7431. {
  7432. VerifyMarkArena(&guestArenaIter.Data());
  7433. }
  7434. }
  7435. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  7436. while (externalGuestArenaIter.Next())
  7437. {
  7438. VerifyMarkArena(externalGuestArenaIter.Data());
  7439. }
  7440. // We can't check external roots here
  7441. }
  7442. void
  7443. Recycler::VerifyMarkArena(ArenaData * alloc)
  7444. {
  7445. VerifyMarkBigBlockList(alloc->GetBigBlocks(false));
  7446. VerifyMarkBigBlockList(alloc->GetFullBlocks());
  7447. VerifyMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  7448. }
  7449. void
  7450. Recycler::VerifyMarkBigBlockList(BigBlock * memoryBlocks)
  7451. {
  7452. size_t scanRootBytes = 0;
  7453. BigBlock *blockp = memoryBlocks;
  7454. while (blockp != NULL)
  7455. {
  7456. void** base=(void**)blockp->GetBytes();
  7457. size_t slotCount = blockp->currentByte / sizeof(void*);
  7458. scanRootBytes += blockp->currentByte;
  7459. for (size_t i=0; i < slotCount; i++)
  7460. {
  7461. VerifyMark(base[i]);
  7462. }
  7463. blockp = blockp->nextBigBlock;
  7464. }
  7465. }
  7466. void
  7467. Recycler::VerifyMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  7468. {
  7469. size_t scanRootBytes = 0;
  7470. ArenaMemoryBlock *blockp = memoryBlocks;
  7471. while (blockp != NULL)
  7472. {
  7473. void** base=(void**)blockp->GetBytes();
  7474. size_t slotCount = blockp->nbytes / sizeof(void*);
  7475. scanRootBytes += blockp->nbytes;
  7476. for (size_t i=0; i< slotCount; i++)
  7477. {
  7478. VerifyMark(base[i]);
  7479. }
  7480. blockp = blockp->next;
  7481. }
  7482. }
  7483. void
  7484. Recycler::VerifyMarkStack()
  7485. {
  7486. SAVE_THREAD_CONTEXT();
  7487. void ** stackTop = (void**) this->savedThreadContext.GetStackTop();
  7488. void * stackStart = GetStackBase();
  7489. Assert(stackStart > stackTop);
  7490. for (;stackTop < stackStart; stackTop++)
  7491. {
  7492. void* candidate = *stackTop;
  7493. VerifyMark(nullptr, candidate);
  7494. }
  7495. void** registers = this->savedThreadContext.GetRegisters();
  7496. for (int i = 0; i < SavedRegisterState::NumRegistersToSave; i++)
  7497. {
  7498. VerifyMark(nullptr, registers[i]);
  7499. }
  7500. }
  7501. bool
  7502. Recycler::VerifyMark(void * target)
  7503. {
  7504. return VerifyMark(nullptr, target);
  7505. }
  7506. // objectAddress is nullptr in case of roots
  7507. bool
  7508. Recycler::VerifyMark(void * objectAddress, void * target)
  7509. {
  7510. void * realAddress;
  7511. HeapBlock * heapBlock;
  7512. if (this->enableScanInteriorPointers)
  7513. {
  7514. heapBlock = heapBlockMap.GetHeapBlock(target);
  7515. if (heapBlock == nullptr)
  7516. {
  7517. return false;
  7518. }
  7519. realAddress = heapBlock->GetRealAddressFromInterior(target);
  7520. if (realAddress == nullptr)
  7521. {
  7522. return false;
  7523. }
  7524. }
  7525. else
  7526. {
  7527. heapBlock = this->FindHeapBlock(target);
  7528. if (heapBlock == nullptr)
  7529. {
  7530. return false;
  7531. }
  7532. realAddress = target;
  7533. }
  7534. return heapBlock->VerifyMark(objectAddress, realAddress);
  7535. }
  7536. #endif
  7537. ArenaAllocator *
  7538. Recycler::CreateGuestArena(char16 const * name, void (*outOfMemoryFunc)())
  7539. {
  7540. // Note, guest arenas use the large block allocator.
  7541. return guestArenaList.PrependNode(&HeapAllocator::Instance, name, this->GetDefaultHeapInfo()->GetRecyclerLargeBlockPageAllocator(), outOfMemoryFunc);
  7542. }
  7543. void
  7544. Recycler::DeleteGuestArena(ArenaAllocator * arenaAllocator)
  7545. {
  7546. GuestArenaAllocator * guestArenaAllocator = static_cast<GuestArenaAllocator *>(arenaAllocator);
  7547. #if ENABLE_CONCURRENT_GC
  7548. if (this->hasPendingConcurrentFindRoot)
  7549. {
  7550. // We are doing concurrent find root, don't modify the list and mark the arena to be delete
  7551. // later when we do find root in thread.
  7552. Assert(guestArenaList.HasElement(guestArenaAllocator));
  7553. this->hasPendingDeleteGuestArena = true;
  7554. guestArenaAllocator->pendingDelete = true;
  7555. }
  7556. else
  7557. #endif
  7558. {
  7559. guestArenaList.RemoveElement(&HeapAllocator::Instance, guestArenaAllocator);
  7560. }
  7561. // Any time a root is removed during a GC, it indicates that an exhaustive
  7562. // collection is likely going to have work to do so trigger an exhaustive
  7563. // candidate GC to indicate this fact
  7564. this->CollectNow<CollectExhaustiveCandidate>();
  7565. }
  7566. #ifdef LEAK_REPORT
  7567. void
  7568. Recycler::ReportLeaks()
  7569. {
  7570. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7571. {
  7572. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7573. {
  7574. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7575. struct FakeMemory { Field(int) f; };
  7576. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7577. this->RootAddRef(f);
  7578. }
  7579. LeakReport::StartSection(_u("Object Graph"));
  7580. LeakReport::StartRedirectOutput();
  7581. RecyclerObjectGraphDumper::Param param = { 0 };
  7582. param.skipStack = true;
  7583. if (!this->DumpObjectGraph(&param))
  7584. {
  7585. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7586. LeakReport::Print(_u("ERROR: Out of memory generating leak report\n"));
  7587. param.stats.markData.markCount = 0;
  7588. }
  7589. LeakReport::EndRedirectOutput();
  7590. if (param.stats.markData.markCount != 0)
  7591. {
  7592. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7593. LeakReport::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7594. param.stats.markData.markBytes, param.stats.markData.markCount);
  7595. #ifdef STACK_BACK_TRACE
  7596. if (GetRecyclerFlagsTable().LeakStackTrace)
  7597. {
  7598. LeakReport::StartSection(_u("Pinned object stack traces"));
  7599. LeakReport::StartRedirectOutput();
  7600. this->PrintPinnedObjectStackTraces();
  7601. LeakReport::EndRedirectOutput();
  7602. LeakReport::EndSection();
  7603. }
  7604. #endif
  7605. }
  7606. LeakReport::EndSection();
  7607. }
  7608. }
  7609. void
  7610. Recycler::ReportLeaksOnProcessDetach()
  7611. {
  7612. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7613. {
  7614. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): Process Termination"), this);
  7615. LeakReport::StartRedirectOutput();
  7616. ReportOnProcessDetach([=]() { this->ReportLeaks(); });
  7617. LeakReport::EndRedirectOutput();
  7618. }
  7619. }
  7620. #endif
  7621. #ifdef CHECK_MEMORY_LEAK
  7622. void
  7623. Recycler::CheckLeaks(char16 const * header)
  7624. {
  7625. if (GetRecyclerFlagsTable().CheckMemoryLeak && this->isPrimaryMarkContextInitialized)
  7626. {
  7627. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7628. {
  7629. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7630. struct FakeMemory { Field(int) f; };
  7631. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7632. this->RootAddRef(f);
  7633. }
  7634. Output::CaptureStart();
  7635. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7636. Output::Print(_u("Recycler (%p): %s Leaked Roots\n"), this, header);
  7637. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7638. RecyclerObjectGraphDumper::Param param = { 0 };
  7639. param.dumpRootOnly = true;
  7640. param.skipStack = true;
  7641. if (!this->DumpObjectGraph(&param))
  7642. {
  7643. free(Output::CaptureEnd());
  7644. Output::Print(_u("ERROR: Out of memory generating leak report\n"));
  7645. return;
  7646. }
  7647. if (param.stats.markData.markCount != 0)
  7648. {
  7649. #ifdef STACK_BACK_TRACE
  7650. if (GetRecyclerFlagsTable().LeakStackTrace)
  7651. {
  7652. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7653. Output::Print(_u("Pinned object stack traces"));
  7654. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7655. this->PrintPinnedObjectStackTraces();
  7656. }
  7657. #endif
  7658. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7659. Output::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7660. param.stats.markData.markBytes, param.stats.markData.markCount);
  7661. char16 * buffer = Output::CaptureEnd();
  7662. MemoryLeakCheck::AddLeakDump(buffer, param.stats.markData.markBytes, param.stats.markData.markCount);
  7663. #ifdef GENERATE_DUMP
  7664. if (GetRecyclerFlagsTable().IsEnabled(Js::DumpOnLeakFlag))
  7665. {
  7666. Js::Throw::GenerateDump(GetRecyclerFlagsTable().DumpOnLeak);
  7667. }
  7668. #endif
  7669. }
  7670. else
  7671. {
  7672. free(Output::CaptureEnd());
  7673. }
  7674. }
  7675. }
  7676. void
  7677. Recycler::CheckLeaksOnProcessDetach(char16 const * header)
  7678. {
  7679. if (GetRecyclerFlagsTable().CheckMemoryLeak)
  7680. {
  7681. ReportOnProcessDetach([=]() { this->CheckLeaks(header); });
  7682. }
  7683. }
  7684. #endif
  7685. #if defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7686. template <class Fn>
  7687. void
  7688. Recycler::ReportOnProcessDetach(Fn fn)
  7689. {
  7690. #if DBG
  7691. // Process detach can be done on any thread, just disable the thread check
  7692. this->markContext.GetPageAllocator()->SetDisableThreadAccessCheck();
  7693. #endif
  7694. #if ENABLE_CONCURRENT_GC
  7695. if (this->IsConcurrentState())
  7696. {
  7697. this->AbortConcurrent(true);
  7698. }
  7699. if (this->CollectionInProgress())
  7700. {
  7701. Output::Print(_u("WARNING: Thread terminated during GC. Can't dump object graph\n"));
  7702. return;
  7703. }
  7704. #else
  7705. Assert(!this->CollectionInProgress());
  7706. #endif
  7707. // Don't mark external roots on another thread
  7708. this->SetExternalRootMarker(NULL, NULL);
  7709. #if DBG
  7710. this->ResetThreadId();
  7711. #endif
  7712. fn();
  7713. }
  7714. #ifdef STACK_BACK_TRACE
  7715. void
  7716. Recycler::PrintPinnedObjectStackTraces()
  7717. {
  7718. pinnedObjectMap.Map([this](void * object, PinRecord const& pinRecord)
  7719. {
  7720. this->DumpObjectDescription(object);
  7721. Output::Print(_u("\n"));
  7722. StackBackTraceNode::PrintAll(pinRecord.stackBackTraces);
  7723. }
  7724. );
  7725. }
  7726. #endif
  7727. #endif
  7728. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7729. void
  7730. Recycler::SetInDllCanUnloadNow()
  7731. {
  7732. inDllCanUnloadNow = true;
  7733. // Just clear out the root marker for the dump graph and report leaks
  7734. SetExternalRootMarker(NULL, NULL);
  7735. }
  7736. void
  7737. Recycler::SetInDetachProcess()
  7738. {
  7739. inDetachProcess = true;
  7740. // Just clear out the root marker for the dump graph and report leaks
  7741. SetExternalRootMarker(NULL, NULL);
  7742. }
  7743. #endif
  7744. #ifdef ENABLE_JS_ETW
  7745. ULONG Recycler::EventWriteFreeMemoryBlock(HeapBlock* heapBlock)
  7746. {
  7747. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK())
  7748. {
  7749. char* memoryAddress = NULL;
  7750. ULONG objectSize = 0;
  7751. ULONG blockSize = 0;
  7752. switch (heapBlock->GetHeapBlockType())
  7753. {
  7754. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  7755. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  7756. #ifdef RECYCLER_WRITE_BARRIER
  7757. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  7758. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  7759. #endif
  7760. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  7761. {
  7762. SmallHeapBlock* smallHeapBlock = static_cast<SmallHeapBlock*>(heapBlock);
  7763. memoryAddress = smallHeapBlock->GetAddress();
  7764. blockSize = (ULONG)(smallHeapBlock->GetEndAddress() - memoryAddress);
  7765. objectSize = smallHeapBlock->GetObjectSize();
  7766. }
  7767. break;
  7768. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  7769. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  7770. #ifdef RECYCLER_WRITE_BARRIER
  7771. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  7772. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  7773. #endif
  7774. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  7775. {
  7776. MediumHeapBlock* mediumHeapBlock = static_cast<MediumHeapBlock*>(heapBlock);
  7777. memoryAddress = mediumHeapBlock->GetAddress();
  7778. blockSize = (ULONG)(mediumHeapBlock->GetEndAddress() - memoryAddress);
  7779. objectSize = mediumHeapBlock->GetObjectSize();
  7780. }
  7781. case HeapBlock::HeapBlockType::LargeBlockType:
  7782. {
  7783. LargeHeapBlock* largeHeapBlock = static_cast<LargeHeapBlock*>(heapBlock);
  7784. memoryAddress = largeHeapBlock->GetBeginAddress();
  7785. blockSize = (ULONG)(largeHeapBlock->GetEndAddress() - memoryAddress);
  7786. objectSize = blockSize;
  7787. }
  7788. break;
  7789. default:
  7790. AssertMsg(FALSE, "invalid heapblock type");
  7791. }
  7792. EventWriteJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK(memoryAddress, blockSize, objectSize);
  7793. }
  7794. return S_OK;
  7795. }
  7796. void Recycler::FlushFreeRecord()
  7797. {
  7798. Assert(bulkFreeMemoryWrittenCount <= Recycler::BulkFreeMemoryCount);
  7799. JS_ETW(EventWriteJSCRIPT_RECYCLER_FREE_MEMORY(bulkFreeMemoryWrittenCount, sizeof(Recycler::ETWFreeRecord), etwFreeRecords));
  7800. bulkFreeMemoryWrittenCount = 0;
  7801. }
  7802. void Recycler::AppendFreeMemoryETWRecord(__in char *address, size_t size)
  7803. {
  7804. Assert(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7805. __analysis_assume(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7806. etwFreeRecords[bulkFreeMemoryWrittenCount].memoryAddress = address;
  7807. // TODO: change to size_t or uint64?
  7808. etwFreeRecords[bulkFreeMemoryWrittenCount].objectSize = (uint)size;
  7809. bulkFreeMemoryWrittenCount++;
  7810. if (bulkFreeMemoryWrittenCount == Recycler::BulkFreeMemoryCount)
  7811. {
  7812. FlushFreeRecord();
  7813. Assert(bulkFreeMemoryWrittenCount == 0);
  7814. }
  7815. }
  7816. #endif
  7817. #ifdef PROFILE_EXEC
  7818. ArenaAllocator *
  7819. Recycler::AddBackgroundProfilerArena()
  7820. {
  7821. return this->backgroundProfilerArena.PrependNode(&HeapAllocator::Instance,
  7822. _u("BgGCProfiler"), &this->backgroundProfilerPageAllocator, Js::Throw::OutOfMemory);
  7823. }
  7824. void
  7825. Recycler::ReleaseBackgroundProfilerArena(ArenaAllocator * arena)
  7826. {
  7827. this->backgroundProfilerArena.RemoveElement(&HeapAllocator::Instance, arena);
  7828. }
  7829. void
  7830. Recycler::SetProfiler(Js::Profiler * profiler, Js::Profiler * backgroundProfiler)
  7831. {
  7832. this->profiler = profiler;
  7833. this->backgroundProfiler = backgroundProfiler;
  7834. }
  7835. #endif
  7836. void Recycler::SetObjectBeforeCollectCallback(void* object,
  7837. ObjectBeforeCollectCallback callback,
  7838. void* callbackState,
  7839. ObjectBeforeCollectCallbackWrapper callbackWrapper,
  7840. void* threadContext)
  7841. {
  7842. if (objectBeforeCollectCallbackState == ObjectBeforeCollectCallback_Shutdown)
  7843. {
  7844. return; // NOP at shutdown
  7845. }
  7846. if (objectBeforeCollectCallbackMap == nullptr)
  7847. {
  7848. if (callback == nullptr) return;
  7849. objectBeforeCollectCallbackMap = HeapNew(ObjectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7850. }
  7851. // only allow 1 callback per object
  7852. objectBeforeCollectCallbackMap->Item(object, ObjectBeforeCollectCallbackData(callbackWrapper, callback, callbackState, threadContext));
  7853. if (callback != nullptr && this->IsInObjectBeforeCollectCallback()) // revive
  7854. {
  7855. this->ScanMemory<false>(&object, sizeof(object));
  7856. this->ProcessMark(/*background*/false);
  7857. }
  7858. }
  7859. bool Recycler::ProcessObjectBeforeCollectCallbacks(bool atShutdown/*= false*/)
  7860. {
  7861. if (this->objectBeforeCollectCallbackMap == nullptr)
  7862. {
  7863. return false; // no callbacks
  7864. }
  7865. Assert(atShutdown || this->IsMarkState());
  7866. Assert(!this->IsInObjectBeforeCollectCallback());
  7867. AutoRestoreValue<ObjectBeforeCollectCallbackState> autoInObjectBeforeCollectCallback(&objectBeforeCollectCallbackState,
  7868. atShutdown ? ObjectBeforeCollectCallback_Shutdown: ObjectBeforeCollectCallback_Normal);
  7869. // The callbacks may register/unregister callbacks while we are enumerating the current map. To avoid
  7870. // conflicting usage of the callback map, we swap it out. New registration will go to a new map.
  7871. AutoAllocatorObjectPtr<ObjectBeforeCollectCallbackMap, HeapAllocator> oldCallbackMap(
  7872. this->objectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7873. this->objectBeforeCollectCallbackMap = nullptr;
  7874. bool hasRemainingCallbacks = false;
  7875. oldCallbackMap->MapAndRemoveIf([&](const ObjectBeforeCollectCallbackMap::EntryType& entry)
  7876. {
  7877. const ObjectBeforeCollectCallbackData& data = entry.Value();
  7878. if (data.callback != nullptr)
  7879. {
  7880. void* object = entry.Key();
  7881. if (atShutdown || !this->IsObjectMarked(object))
  7882. {
  7883. if (data.callbackWrapper != nullptr)
  7884. {
  7885. data.callbackWrapper(data.callback, object, data.callbackState, data.threadContext);
  7886. }
  7887. else
  7888. {
  7889. data.callback(object, data.callbackState);
  7890. }
  7891. }
  7892. else
  7893. {
  7894. hasRemainingCallbacks = true;
  7895. return false; // Do not remove this entry, remaining callback for future
  7896. }
  7897. }
  7898. return true; // Remove this entry
  7899. });
  7900. // Merge back remaining callbacks if any
  7901. if (hasRemainingCallbacks)
  7902. {
  7903. if (this->objectBeforeCollectCallbackMap == nullptr)
  7904. {
  7905. this->objectBeforeCollectCallbackMap = oldCallbackMap.Detach();
  7906. }
  7907. else
  7908. {
  7909. if (oldCallbackMap->Count() > this->objectBeforeCollectCallbackMap->Count())
  7910. {
  7911. // Swap so that oldCallbackMap is the smaller one
  7912. ObjectBeforeCollectCallbackMap* tmp = oldCallbackMap.Detach();
  7913. *&oldCallbackMap = this->objectBeforeCollectCallbackMap;
  7914. this->objectBeforeCollectCallbackMap = tmp;
  7915. }
  7916. oldCallbackMap->Map([&](void* object, const ObjectBeforeCollectCallbackData& data)
  7917. {
  7918. this->objectBeforeCollectCallbackMap->Item(object, data);
  7919. });
  7920. }
  7921. }
  7922. return true; // maybe called callbacks
  7923. }
  7924. void Recycler::ClearObjectBeforeCollectCallbacks()
  7925. {
  7926. // This is called at shutting down. All objects will be gone. Invoke each registered callback if any.
  7927. ProcessObjectBeforeCollectCallbacks(/*atShutdown*/true);
  7928. Assert(objectBeforeCollectCallbackMap == nullptr);
  7929. }
  7930. #ifdef RECYCLER_TEST_SUPPORT
  7931. void Recycler::SetCheckFn(BOOL(*checkFn)(char* addr, size_t size))
  7932. {
  7933. Assert(BinaryFeatureControl::RecyclerTest());
  7934. this->EnsureNotCollecting();
  7935. this->checkFn = checkFn;
  7936. }
  7937. #endif
  7938. void
  7939. Recycler::NotifyFree(__in char *address, size_t size)
  7940. {
  7941. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Sweeping object %p\n"), address);
  7942. #ifdef RECYCLER_TEST_SUPPORT
  7943. if (BinaryFeatureControl::RecyclerTest())
  7944. {
  7945. if (checkFn != NULL)
  7946. checkFn(address, size);
  7947. }
  7948. #endif
  7949. #ifdef ENABLE_JS_ETW
  7950. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY())
  7951. {
  7952. AppendFreeMemoryETWRecord(address, (UINT)size);
  7953. }
  7954. #endif
  7955. RecyclerMemoryTracking::ReportFree(this, address, size);
  7956. RECYCLER_PERF_COUNTER_DEC(LiveObject);
  7957. RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, size);
  7958. RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, size);
  7959. if (HeapInfo::IsSmallBlockAllocation(HeapInfo::GetAlignedSizeNoCheck(size)))
  7960. {
  7961. RECYCLER_PERF_COUNTER_DEC(SmallHeapBlockLiveObject);
  7962. RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockLiveObjectSize, size);
  7963. RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockFreeObjectSize, size);
  7964. }
  7965. else
  7966. {
  7967. RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockLiveObject);
  7968. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, size);
  7969. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockFreeObjectSize, size);
  7970. }
  7971. #ifdef RECYCLER_MEMORY_VERIFY
  7972. if (this->VerifyEnabled())
  7973. {
  7974. VerifyCheckPad(address, size);
  7975. }
  7976. #endif
  7977. #ifdef PROFILE_RECYCLER_ALLOC
  7978. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  7979. {
  7980. TrackFree(address, size);
  7981. }
  7982. #endif
  7983. #ifdef RECYCLER_STATS
  7984. collectionStats.objectSweptCount++;
  7985. collectionStats.objectSweptBytes += size;
  7986. if (!isForceSweeping)
  7987. {
  7988. collectionStats.objectSweptFreeListCount++;
  7989. collectionStats.objectSweptFreeListBytes += size;
  7990. }
  7991. #endif
  7992. }
  7993. #if GLOBAL_ENABLE_WRITE_BARRIER
  7994. void
  7995. Recycler::RegisterPendingWriteBarrierBlock(void* address, size_t bytes)
  7996. {
  7997. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  7998. {
  7999. #if DBG
  8000. WBSetBitRange((char*)address, (uint)bytes/sizeof(void*));
  8001. #endif
  8002. pendingWriteBarrierBlockMap.Item(address, bytes);
  8003. RecyclerWriteBarrierManager::WriteBarrier(address, bytes);
  8004. }
  8005. }
  8006. void
  8007. Recycler::UnRegisterPendingWriteBarrierBlock(void* address)
  8008. {
  8009. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  8010. {
  8011. pendingWriteBarrierBlockMap.Remove(address);
  8012. }
  8013. }
  8014. #endif
  8015. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  8016. void
  8017. Recycler::WBVerifyBitIsSet(char* addr, char* target)
  8018. {
  8019. AutoCriticalSection lock(&recyclerListLock);
  8020. Recycler* recycler = Recycler::recyclerList;
  8021. while (recycler)
  8022. {
  8023. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  8024. if (heapBlock)
  8025. {
  8026. heapBlock->WBVerifyBitIsSet(addr);
  8027. break;
  8028. }
  8029. recycler = recycler->next;
  8030. }
  8031. }
  8032. void
  8033. Recycler::WBSetBit(char* addr)
  8034. {
  8035. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  8036. {
  8037. AutoCriticalSection lock(&recyclerListLock);
  8038. Recycler* recycler = Recycler::recyclerList;
  8039. while (recycler)
  8040. {
  8041. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  8042. if (heapBlock)
  8043. {
  8044. heapBlock->WBSetBit(addr);
  8045. break;
  8046. }
  8047. recycler = recycler->next;
  8048. }
  8049. }
  8050. }
  8051. void
  8052. Recycler::WBSetBitRange(char* addr, uint count)
  8053. {
  8054. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  8055. {
  8056. AutoCriticalSection lock(&recyclerListLock);
  8057. Recycler* recycler = Recycler::recyclerList;
  8058. while (recycler)
  8059. {
  8060. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  8061. if (heapBlock)
  8062. {
  8063. heapBlock->WBSetBitRange(addr, count);
  8064. break;
  8065. }
  8066. recycler = recycler->next;
  8067. }
  8068. }
  8069. }
  8070. bool
  8071. Recycler::WBCheckIsRecyclerAddress(char* addr)
  8072. {
  8073. AutoCriticalSection lock(&recyclerListLock);
  8074. Recycler* recycler = Recycler::recyclerList;
  8075. while (recycler)
  8076. {
  8077. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  8078. if (heapBlock)
  8079. {
  8080. return true;
  8081. }
  8082. recycler = recycler->next;
  8083. }
  8084. return false;
  8085. }
  8086. #endif
  8087. #ifdef RECYCLER_FINALIZE_CHECK
  8088. void
  8089. Recycler::VerifyFinalize()
  8090. {
  8091. // We can't check this if we are marking
  8092. Assert(!this->IsMarkState());
  8093. size_t currentFinalizableObjectCount = this->autoHeap.GetFinalizeCount();
  8094. #if DBG
  8095. Assert(currentFinalizableObjectCount == this->collectionStats.finalizeCount);
  8096. #else
  8097. if (currentFinalizableObjectCount != >this->collectionStats.finalizeCount)
  8098. {
  8099. Output::Print(_u("ERROR: Recycler dropped some finalizable objects"));
  8100. DebugBreak();
  8101. }
  8102. #endif
  8103. }
  8104. #endif
  8105. size_t
  8106. RecyclerHeapObjectInfo::GetSize() const
  8107. {
  8108. Assert(m_heapBlock);
  8109. size_t size;
  8110. #if LARGEHEAPBLOCK_ENCODING
  8111. if (isUsingLargeHeapBlock)
  8112. {
  8113. size = m_largeHeapBlockHeader->objectSize;
  8114. }
  8115. #else
  8116. if (m_heapBlock->IsLargeHeapBlock())
  8117. {
  8118. size = ((LargeHeapBlock*)m_heapBlock)->GetObjectSize(m_address);
  8119. }
  8120. #endif
  8121. else
  8122. {
  8123. // All small heap block types have the same layout for the object size field.
  8124. size = ((SmallHeapBlock*)m_heapBlock)->GetObjectSize();
  8125. }
  8126. #ifdef RECYCLER_MEMORY_VERIFY
  8127. if (m_recycler->VerifyEnabled())
  8128. {
  8129. size -= *(size_t *)(((char *)m_address) + size - sizeof(size_t));
  8130. }
  8131. #endif
  8132. return size;
  8133. }
  8134. template char* Recycler::AllocWithAttributesInlined<(Memory::ObjectInfoBits)32, false>(size_t);
  8135. #ifdef RECYCLER_VISITED_HOST
  8136. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedFinalizableBits, /* nothrow = */true>(size_t);
  8137. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostFinalizableBits, /* nothrow = */true>(size_t);
  8138. template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedBits, /* nothrow = */true>(size_t);
  8139. template char* Recycler::AllocZeroWithAttributesInlined<LeafBit, /* nothrow = */true>(size_t);
  8140. #endif