| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276 |
- //-------------------------------------------------------------------------------------------------------
- // Copyright (C) Microsoft. All rights reserved.
- // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
- //-------------------------------------------------------------------------------------------------------
- #include "CommonMemoryPch.h"
- #ifdef _M_AMD64
- #include "amd64.h"
- #endif
- #ifdef _M_ARM
- #include "arm.h"
- #endif
- #ifdef _M_ARM64
- #include "arm64.h"
- #endif
- #include "Core/BinaryFeatureControl.h"
- #include "Common/ThreadService.h"
- #include "Memory/AutoAllocatorObjectPtr.h"
- #include "Common/Tick.h"
- DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
- #ifdef PROFILE_RECYCLER_ALLOC
- struct UnallocatedPortionOfBumpAllocatedBlock
- {
- };
- struct ExplicitFreeListedObject
- {
- };
- Recycler::TrackerData Recycler::TrackerData::EmptyData(&typeid(UnallocatedPortionOfBumpAllocatedBlock), false);
- Recycler::TrackerData Recycler::TrackerData::ExplicitFreeListObjectData(&typeid(ExplicitFreeListedObject), false);
- #endif
- DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
- inline bool
- DefaultRecyclerCollectionWrapper::IsCollectionDisabled(Recycler * recycler)
- {
- // GC shouldn't be triggered during heap enum, unless we missed a case where it allocate memory (which
- // shouldn't happen during heap enum) or for the case we explicitly allow allocation
- // REVIEW: isHeapEnumInProgress should have been a collection state and checked before to avoid a check here.
- // Collection will be disabled in VarDispEx because it could be called from projection re-entrance as ASTA allows
- // QI/AddRef/Release to come back.
- bool collectionDisabled = recycler->IsCollectionDisabled();
- #if DBG
- if (collectionDisabled)
- {
- // disabled collection should only happen if we allowed allocation during heap enum
- if (recycler->IsHeapEnumInProgress())
- {
- Assert(recycler->AllowAllocationDuringHeapEnum());
- }
- else
- {
- #ifdef ENABLE_PROJECTION
- Assert(recycler->IsInRefCountTrackingForProjection());
- #else
- Assert(false);
- #endif
- }
- }
- #endif
- return collectionDisabled;
- }
- BOOL DefaultRecyclerCollectionWrapper::ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags)
- {
- if (IsCollectionDisabled(recycler))
- {
- return FALSE;
- }
- BOOL ret = FALSE;
- BEGIN_NO_EXCEPTION
- {
- ret = (recycler->*(function))(flags);
- }
- END_NO_EXCEPTION;
- return ret;
- }
- void
- DefaultRecyclerCollectionWrapper::DisposeObjects(Recycler * recycler)
- {
- if (IsCollectionDisabled(recycler))
- {
- return;
- }
- BEGIN_NO_EXCEPTION
- {
- recycler->DisposeObjects();
- }
- END_NO_EXCEPTION;
- }
- static void* GetStackBase();
- template _ALWAYSINLINE char * Recycler::AllocWithAttributesInlined<NoBit, false>(size_t size);
- template _ALWAYSINLINE char* Recycler::RealAlloc<NoBit, false>(HeapInfo* heap, size_t size);
- template _ALWAYSINLINE _Ret_notnull_ void * __cdecl operator new<Recycler>(size_t byteSize, Recycler * alloc, char * (Recycler::*AllocFunc)(size_t));
- Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllocator * pageAllocator, void (*outOfMemoryFunc)(), Js::ConfigFlagsTable& configFlagsTable, RecyclerTelemetryHostInterface* hostInterface) :
- collectionStateChangedObserver(this),
- collectionState(CollectionStateNotCollecting, &collectionStateChangedObserver),
- recyclerFlagsTable(configFlagsTable),
- autoHeap(policyManager, configFlagsTable, pageAllocator),
- #ifdef ENABLE_JS_ETW
- collectionStartReason(ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown),
- collectionFinishReason(ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown),
- #endif
- threadService(nullptr),
- markPagePool(configFlagsTable),
- parallelMarkPagePool1(configFlagsTable),
- parallelMarkPagePool2(configFlagsTable),
- parallelMarkPagePool3(configFlagsTable),
- markContext(this, &this->markPagePool),
- parallelMarkContext1(this, &this->parallelMarkPagePool1),
- parallelMarkContext2(this, &this->parallelMarkPagePool2),
- parallelMarkContext3(this, &this->parallelMarkPagePool3),
- #if ENABLE_PARTIAL_GC
- clientTrackedObjectAllocator(_u("CTO-List"), pageAllocator, Js::Throw::OutOfMemory),
- #endif
- outOfMemoryFunc(outOfMemoryFunc),
- #ifdef RECYCLER_TEST_SUPPORT
- checkFn(NULL),
- #endif
- externalRootMarker(NULL),
- externalRootMarkerContext(NULL),
- recyclerSweepManager(nullptr),
- inEndMarkOnLowMemory(false),
- enableScanInteriorPointers(CUSTOM_CONFIG_FLAG(configFlagsTable, RecyclerForceMarkInterior)),
- enableScanImplicitRoots(false),
- disableCollectOnAllocationHeuristics(false),
- skipStack(false),
- mainThreadHandle(NULL),
- #if ENABLE_CONCURRENT_GC
- backgroundFinishMarkCount(0),
- hasPendingUnpinnedObject(false),
- hasPendingConcurrentFindRoot(false),
- queueTrackedObject(false),
- enableConcurrentMark(false), // Default to non-concurrent
- enableParallelMark(false),
- enableConcurrentSweep(false),
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- allowAllocationsDuringConcurrentSweepForCollection(false),
- #endif
- concurrentThread(NULL),
- concurrentWorkReadyEvent(NULL),
- concurrentWorkDoneEvent(NULL),
- parallelThread1(this, &Recycler::ParallelWorkFunc<0>),
- parallelThread2(this, &Recycler::ParallelWorkFunc<1>),
- priorityBoost(false),
- isAborting(false),
- #if DBG
- concurrentThreadExited(true),
- isProcessingTrackedObjects(false),
- hasIncompleteDoCollect(false),
- isConcurrentGCOnIdle(false),
- isFinishGCOnIdle(false),
- #endif
- #ifdef IDLE_DECOMMIT_ENABLED
- concurrentIdleDecommitEvent(nullptr),
- #endif
- #endif
- #if DBG
- isExternalStackSkippingGC(false),
- isProcessingRescan(false),
- #endif
- #if ENABLE_PARTIAL_GC
- inPartialCollectMode(false),
- scanPinnedObjectMap(false),
- partialUncollectedAllocBytes(0),
- uncollectedNewPageCountPartialCollect((size_t)-1),
- #if ENABLE_CONCURRENT_GC
- partialConcurrentNextCollection(false),
- #endif
- #ifdef RECYCLER_STRESS
- forcePartialScanStack(false),
- #endif
- #endif
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- isPrimaryMarkContextInitialized(false),
- #endif
- allowDispose(false),
- inDisposeWrapper(false),
- hasDisposableObject(false),
- hasNativeGCHost(false),
- tickCountNextDispose(0),
- transientPinnedObject(nullptr),
- pinnedObjectMap(1024, HeapAllocator::GetNoMemProtectInstance()),
- weakReferenceMap(1024, HeapAllocator::GetNoMemProtectInstance()),
- weakReferenceCleanupId(0),
- #if ENABLE_WEAK_REFERENCE_REGIONS
- weakReferenceRegionList(HeapAllocator::GetNoMemProtectInstance()),
- #endif
- collectionWrapper(&DefaultRecyclerCollectionWrapper::Instance),
- isScriptActive(false),
- isInScript(false),
- isShuttingDown(false),
- inExhaustiveCollection(false),
- hasExhaustiveCandidate(false),
- inDecommitNowCollection(false),
- inCacheCleanupCollection(false),
- hasPendingDeleteGuestArena(false),
- needOOMRescan(false),
- #if ENABLE_CONCURRENT_GC && ENABLE_PARTIAL_GC
- hasBackgroundFinishPartial(false),
- #endif
- decommitOnFinish(false)
- #ifdef PROFILE_EXEC
- , profiler(nullptr)
- , backgroundProfiler(nullptr)
- , backgroundProfilerPageAllocator(nullptr, configFlagsTable, PageAllocatorType_GCThread)
- , backgroundProfilerArena()
- #endif
- #ifdef PROFILE_MEM
- , memoryData(nullptr)
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- , objectGraphDumper(nullptr)
- , dumpObjectOnceOnCollect(false)
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- , trackerDictionary(nullptr)
- #endif
- #ifdef HEAP_ENUMERATION_VALIDATION
- ,pfPostHeapEnumScanCallback(nullptr)
- #endif
- #ifdef NTBUILD
- , telemetryBlock(&localTelemetryBlock)
- #endif
- #ifdef ENABLE_BASIC_TELEMETRY
- , telemetryStats(this, hostInterface)
- #endif
- #ifdef ENABLE_JS_ETW
- ,bulkFreeMemoryWrittenCount(0)
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- , isPageHeapEnabled(false)
- , capturePageHeapAllocStack(false)
- , capturePageHeapFreeStack(false)
- #endif
- , objectBeforeCollectCallbackMap(nullptr)
- , objectBeforeCollectCallbackState(ObjectBeforeCollectCallback_None)
- #if GLOBAL_ENABLE_WRITE_BARRIER
- , pendingWriteBarrierBlockMap(&HeapAllocator::Instance)
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- , trackerCriticalSection(nullptr)
- #endif
- {
- #ifdef ENABLE_BASIC_TELEMETRY
- if (CoCreateGuid(&recyclerID) != S_OK)
- {
- // CoCreateGuid failed
- recyclerID = { 0 };
- }
- this->GetHeapInfo()->GetRecyclerPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetThreadPageAllocator_decommitStats());
- this->GetHeapInfo()->GetRecyclerLeafPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerLeafPageAllocator_decommitStats());
- this->GetHeapInfo()->GetRecyclerLargeBlockPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerLargeBlockPageAllocator_decommitStats());
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- this->GetHeapInfo()->GetRecyclerWithBarrierPageAllocator()->SetDecommitStats(this->GetRecyclerTelemetryInfo().GetRecyclerWithBarrierPageAllocator_decommitStats());
- #endif
- #endif
- #ifdef RECYCLER_MARK_TRACK
- this->markMap = NoCheckHeapNew(MarkMap, &NoCheckHeapAllocator::Instance, 163, &markMapCriticalSection);
- markContext.SetMarkMap(markMap);
- parallelMarkContext1.SetMarkMap(markMap);
- parallelMarkContext2.SetMarkMap(markMap);
- parallelMarkContext3.SetMarkMap(markMap);
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- verifyPad = GetRecyclerFlagsTable().RecyclerVerifyPadSize;
- verifyEnabled = GetRecyclerFlagsTable().IsEnabled(Js::RecyclerVerifyFlag);
- if (verifyEnabled)
- {
- autoHeap.EnableVerify();
- }
- #endif
- #ifdef RECYCLER_NO_PAGE_REUSE
- if (GetRecyclerFlagsTable().RecyclerNoPageReuse)
- {
- autoHeap.DisablePageReuse();
- }
- #endif
- this->inDispose = false;
- #if DBG
- this->heapBlockCount = 0;
- this->disableThreadAccessCheck = false;
- #if ENABLE_CONCURRENT_GC
- this->disableConcurrentThreadExitedCheck = false;
- #endif
- #endif
- #if DBG || defined RECYCLER_TRACE
- this->collectionCount = 0;
- this->inResolveExternalWeakReferences = false;
- #endif
- #if DBG || defined(RECYCLER_STATS)
- isForceSweeping = false;
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- collectionStats.finalizeCount = 0;
- #endif
- RecyclerMemoryTracking::ReportRecyclerCreate(this);
- #if DBG_DUMP
- forceTraceMark = false;
- #endif
- isHeapEnumInProgress = false;
- isCollectionDisabled = false;
- #if DBG
- allowAllocationDuringRenentrance = false;
- allowAllocationDuringHeapEnum = false;
- #ifdef ENABLE_PROJECTION
- isInRefCountTrackingForProjection = false;
- #endif
- #endif
- ScheduleNextCollection();
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- this->inDllCanUnloadNow = false;
- this->inDetachProcess = false;
- #endif
- #ifdef NTBUILD
- memset(&localTelemetryBlock, 0, sizeof(localTelemetryBlock));
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // recycler requires at least Recycler::PrimaryMarkStackReservedPageCount to function properly for the main mark context
- this->markContext.SetMaxPageCount(max(static_cast<size_t>(GetRecyclerFlagsTable().MaxMarkStackPageCount), static_cast<size_t>(Recycler::PrimaryMarkStackReservedPageCount)));
- this->parallelMarkContext1.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- this->parallelMarkContext2.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- this->parallelMarkContext3.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- if (GetRecyclerFlagsTable().IsEnabled(Js::GCMemoryThresholdFlag))
- {
- // Note, we can't do this in the constructor for RecyclerHeuristic::Instance because it runs before config is processed
- RecyclerHeuristic::Instance.ConfigureBaseFactor(GetRecyclerFlagsTable().GCMemoryThreshold);
- }
- #endif
- }
- #if DBG
- void
- Recycler::SetDisableThreadAccessCheck()
- {
- autoHeap.SetDisableThreadAccessCheck();
- disableThreadAccessCheck = true;
- }
- #endif
- void
- Recycler::SetMemProtectMode()
- {
- this->enableScanInteriorPointers = true;
- this->enableScanImplicitRoots = true;
- this->disableCollectOnAllocationHeuristics = true;
- #ifdef RECYCLER_STRESS
- this->recyclerStress = GetRecyclerFlagsTable().MemProtectHeapStress;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = GetRecyclerFlagsTable().MemProtectHeapBackgroundStress;
- this->recyclerConcurrentStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentStress;
- this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentRepeatStress;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = GetRecyclerFlagsTable().MemProtectHeapPartialStress;
- #endif
- #endif
- }
- void
- Recycler::LogMemProtectHeapSize(bool fromGC)
- {
- Assert(IsMemProtectMode());
- #ifdef ENABLE_JS_ETW
- if (IS_JS_ETW(EventEnabledMEMPROTECT_GC_HEAP_SIZE()))
- {
- size_t usedBytes = autoHeap.GetUsedBytes();
- size_t reservedBytes = autoHeap.GetReservedBytes();
- size_t committedBytes = autoHeap.GetCommittedBytes();
- size_t numberOfSegments = autoHeap.GetNumberOfSegments();
- JS_ETW(EventWriteMEMPROTECT_GC_HEAP_SIZE(this, usedBytes, reservedBytes, committedBytes, numberOfSegments, fromGC));
- }
- #endif
- }
- #if DBG
- void
- Recycler::SetDisableConcurrentThreadExitedCheck()
- {
- #if ENABLE_CONCURRENT_GC
- disableConcurrentThreadExitedCheck = true;
- #endif
- #ifdef RECYCLER_STRESS
- this->recyclerStress = false;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = false;
- this->recyclerConcurrentStress = false;
- this->recyclerConcurrentRepeatStress = false;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = false;
- #endif
- #endif
- }
- #endif
- #if DBG
- void
- Recycler::ResetThreadId()
- {
- autoHeap.ResetThreadId();
- #if ENABLE_CONCURRENT_GC
- if (this->IsConcurrentEnabled())
- {
- markContext.GetPageAllocator()->ClearConcurrentThreadId();
- }
- #endif
- #if defined(DBG) && defined(PROFILE_EXEC)
- this->backgroundProfilerPageAllocator.ClearConcurrentThreadId();
- #endif
- }
- #endif
- Recycler::~Recycler()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->isAborting);
- #endif
- #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
- recyclerListLock.Enter();
- if (recyclerList == this)
- {
- recyclerList = this->next;
- }
- else if(recyclerList)
- {
- Recycler* list = recyclerList;
- while (list->next != this)
- {
- list = list->next;
- }
- list->next = this->next;
- }
- recyclerListLock.Leave();
- #endif
- // Stop any further collection
- this->isShuttingDown = true;
- #if DBG
- this->ResetThreadId();
- #endif
- #ifdef ENABLE_JS_ETW
- FlushFreeRecord();
- #endif
- ClearObjectBeforeCollectCallbacks();
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (GetRecyclerFlagsTable().DumpObjectGraphOnExit)
- {
- // Always skip stack here, as we may be running the dtor on another thread.
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.skipStack = true;
- this->DumpObjectGraph(¶m);
- }
- #endif
- AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): %s"), this, this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") :
- this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
- #ifdef LEAK_REPORT
- ReportLeaks();
- #endif
- #ifdef CHECK_MEMORY_LEAK
- CheckLeaks(this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") : this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
- #endif
- AUTO_LEAK_REPORT_SECTION_0(this->GetRecyclerFlagsTable(), _u("Skipped finalizers"));
- #if ENABLE_CONCURRENT_GC
- Assert(concurrentThread == nullptr);
- // We only sometime clean up the state after abort concurrent to not collection
- // Still need to delete heap block that is held by the recyclerSweep
- if (recyclerSweepManager != nullptr)
- {
- recyclerSweepManager->ShutdownCleanup();
- recyclerSweepManager = nullptr;
- }
- if (mainThreadHandle != nullptr)
- {
- CloseHandle(mainThreadHandle);
- }
- #endif
- autoHeap.Close();
- markContext.Release();
- parallelMarkContext1.Release();
- parallelMarkContext2.Release();
- parallelMarkContext3.Release();
- // Clean up the weak reference map so that
- // objects being finalized can safely refer to weak references
- // (this could otherwise become a problem for weak references held
- // to large objects since their block would be destroyed before
- // the finalizer was run)
- // When the recycler is shutting down, all objects are going to be reclaimed
- // so null out the weak references so that anyone relying on weak
- // references simply thinks the object has been reclaimed
- weakReferenceMap.Map([](RecyclerWeakReferenceBase * weakRef) -> bool
- {
- weakRef->strongRef = nullptr;
- // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
- weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
- // Remove
- return false;
- });
- #if ENABLE_PARTIAL_GC
- clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- this->trackerDictionary->Map([](type_info const *, TrackerItem * item)
- {
- NoCheckHeapDelete(item);
- });
- NoCheckHeapDelete(this->trackerDictionary);
- this->trackerDictionary = nullptr;
- delete(trackerCriticalSection);
- }
- #endif
- #ifdef RECYCLER_MARK_TRACK
- NoCheckHeapDelete(this->markMap);
- this->markMap = nullptr;
- #endif
- #if DBG
- // Disable idle decommit asserts
- autoHeap.ShutdownIdleDecommit();
- #endif
- Assert(this->collectionState == CollectionStateExit || this->collectionState == CollectionStateNotCollecting);
- #if ENABLE_CONCURRENT_GC
- Assert(this->disableConcurrentThreadExitedCheck || this->concurrentThreadExited == true);
- #endif
- }
- void
- Recycler::SetIsThreadBound()
- {
- Assert(mainThreadHandle == nullptr);
- ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), &mainThreadHandle,
- 0, FALSE, DUPLICATE_SAME_ACCESS);
- stackBase = GetStackBase();
- }
- void
- Recycler::RootAddRef(void* obj, uint *count)
- {
- Assert(this->IsValidObject(obj));
- if (transientPinnedObject)
- {
- PinRecord& refCount = pinnedObjectMap.GetReference(transientPinnedObject);
- ++refCount;
- if (refCount == 1)
- {
- this->scanPinnedObjectMap = true;
- RECYCLER_PERF_COUNTER_INC(PinnedObject);
- }
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount.stackBackTraces,
- transientPinnedObjectStackBackTrace);
- }
- #endif
- #endif
- }
- if (count != nullptr)
- {
- PinRecord* refCount = pinnedObjectMap.TryGetReference(obj);
- *count = (refCount != nullptr) ? (*refCount + 1) : 1;
- }
- transientPinnedObject = obj;
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- transientPinnedObjectStackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance);
- }
- #endif
- #endif
- }
- void
- Recycler::RootRelease(void* obj, uint *count)
- {
- Assert(this->IsValidObject(obj));
- if (transientPinnedObject == obj)
- {
- transientPinnedObject = nullptr;
- if (count != nullptr)
- {
- PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
- *count = (refCount != nullptr) ? *refCount : 0;
- }
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- transientPinnedObjectStackBackTrace->Delete(&NoCheckHeapAllocator::Instance);
- }
- #endif
- #endif
- }
- else
- {
- PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
- if (refCount == nullptr)
- {
- if (count != nullptr)
- {
- *count = (uint)-1;
- }
- // REVIEW: throw if not found
- Assert(false);
- return;
- }
- uint newRefCount = (--(*refCount));
- if (count != nullptr)
- {
- *count = newRefCount;
- }
- if (newRefCount != 0)
- {
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces,
- StackBackTrace::Capture(&NoCheckHeapAllocator::Instance));
- }
- #endif
- #endif
- return;
- }
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- StackBackTraceNode::DeleteAll(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces);
- refCount->stackBackTraces = nullptr;
- #endif
- #endif
- #if ENABLE_CONCURRENT_GC
- // Don't delete the entry if we are in concurrent find root state
- // We will delete it later on in-thread find root
- if (this->hasPendingConcurrentFindRoot)
- {
- this->hasPendingUnpinnedObject = true;
- }
- else
- #endif
- {
- pinnedObjectMap.Remove(obj);
- }
- RECYCLER_PERF_COUNTER_DEC(PinnedObject);
- }
- // Any time a root is removed during a GC, it indicates that an exhaustive
- // collection is likely going to have work to do so trigger an exhaustive
- // candidate GC to indicate this fact
- this->CollectNow<CollectExhaustiveCandidate>();
- }
- #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
- Recycler* Recycler::recyclerList = nullptr;
- CriticalSection Recycler::recyclerListLock;
- #endif
- void
- Recycler::Initialize(const bool forceInThread, JsUtil::ThreadService *threadService, const bool deferThreadStartup
- #ifdef RECYCLER_PAGE_HEAP
- , PageHeapMode pageheapmode
- , bool captureAllocCallStack
- , bool captureFreeCallStack
- #endif
- )
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- this->InitializeProfileAllocTracker();
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- this->disableCollection = CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::RecyclerPhase);
- #endif
- #if ENABLE_CONCURRENT_GC
- this->skipStack = false;
- #endif
- #if ENABLE_PARTIAL_GC
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- this->enablePartialCollect = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::PartialCollectPhase);
- #else
- this->enablePartialCollect = true;
- #endif
- #endif
- #ifdef PROFILE_MEM
- this->memoryData = MemoryProfiler::GetRecyclerMemoryData();
- #endif
- #if DBG || DBG_DUMP || defined(RECYCLER_TRACE)
- mainThreadId = GetCurrentThreadContextId();
- #endif
- #ifdef RECYCLER_TRACE
- collectionParam.domCollect = false;
- #endif
- #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING)
- bool dontNeedDetailedTracking = false;
- #if defined(PROFILE_RECYCLER_ALLOC)
- dontNeedDetailedTracking = dontNeedDetailedTracking || this->trackerDictionary == nullptr;
- #endif
- #if defined(RECYCLER_MEMORY_VERIFY)
- dontNeedDetailedTracking = dontNeedDetailedTracking || !this->verifyEnabled;
- #endif
- // If we need detailed tracking we force allocation fast path in the JIT to fail and go to the helper, so there is no
- // need for the TrackNativeAllocatedMemoryBlock callback.
- if (dontNeedDetailedTracking)
- {
- autoHeap.Initialize(this, TrackNativeAllocatedMemoryBlock
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- }
- else
- {
- autoHeap.Initialize(this
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- }
- #else
- autoHeap.Initialize(this
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- #endif
- markContext.Init(Recycler::PrimaryMarkStackReservedPageCount);
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- isPrimaryMarkContextInitialized = true;
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- isPageHeapEnabled = autoHeap.IsPageHeapEnabled();
- if (IsPageHeapEnabled())
- {
- capturePageHeapAllocStack = autoHeap.DoCaptureAllocCallStack();
- capturePageHeapFreeStack = autoHeap.DoCaptureFreeCallStack();
- }
- #endif
- #ifdef RECYCLER_STRESS
- #if ENABLE_PARTIAL_GC
- if (GetRecyclerFlagsTable().RecyclerTrackStress)
- {
- // Disable partial if we are doing track stress, since partial relies on ClientTracked processing
- // and track stress doesn't support this.
- this->enablePartialCollect = false;
- }
- #endif
- this->recyclerStress = GetRecyclerFlagsTable().RecyclerStress;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = GetRecyclerFlagsTable().RecyclerBackgroundStress;
- this->recyclerConcurrentStress = GetRecyclerFlagsTable().RecyclerConcurrentStress;
- this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().RecyclerConcurrentRepeatStress;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = GetRecyclerFlagsTable().RecyclerPartialStress;
- #endif
- #endif
- bool needWriteWatch = false;
- #if ENABLE_CONCURRENT_GC
- // Default to non-concurrent
- uint numProcs = (uint)AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
- this->maxParallelism = (numProcs > 4) || CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) ? 4 : numProcs;
- if (forceInThread)
- {
- // Requested a non-concurrent recycler
- this->disableConcurrent = true;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentCollectPhase))
- {
- // Concurrent collection disabled
- this->disableConcurrent = true;
- }
- else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase) &&
- CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) &&
- CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase))
- {
- // All concurrent collection phases disabled
- this->disableConcurrent = true;
- }
- #endif
- else
- {
- this->disableConcurrent = false;
- if (deferThreadStartup || EnableConcurrent(threadService, false))
- {
- #ifdef RECYCLER_WRITE_WATCH
- needWriteWatch = true;
- #endif
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- #ifdef RECYCLER_WRITE_WATCH
- needWriteWatch = true;
- #endif
- }
- #endif
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- if (needWriteWatch)
- {
- // need write watch to support concurrent and/or partial collection
- autoHeap.EnableWriteWatch();
- }
- }
- #endif
- #else
- Assert(!needWriteWatch);
- #endif
- #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
- recyclerListLock.Enter();
- this->next = recyclerList;
- recyclerList = this;
- recyclerListLock.Leave();
- #endif
- }
- BOOL
- Recycler::CollectionInProgress() const
- {
- return collectionState != CollectionStateNotCollecting;
- }
- BOOL
- Recycler::IsExiting() const
- {
- return (collectionState == Collection_Exit);
- }
- BOOL
- Recycler::IsSweeping() const
- {
- return ((collectionState & Collection_Sweep) == Collection_Sweep);
- }
- void
- Recycler::SetIsScriptActive(bool isScriptActive)
- {
- Assert(this->isInScript);
- Assert(this->isScriptActive != isScriptActive);
- this->isScriptActive = isScriptActive;
- if (isScriptActive)
- {
- this->tickCountNextDispose = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
- }
- }
- void
- Recycler::SetIsInScript(bool isInScript)
- {
- Assert(this->isInScript != isInScript);
- this->isInScript = isInScript;
- }
- bool
- Recycler::HasNativeGCHost() const
- {
- return this->hasNativeGCHost;
- }
- void
- Recycler::SetHasNativeGCHost()
- {
- this->hasNativeGCHost = true;
- }
- bool
- Recycler::NeedOOMRescan() const
- {
- return this->needOOMRescan;
- }
- void
- Recycler::SetNeedOOMRescan()
- {
- this->needOOMRescan = true;
- }
- void
- Recycler::ClearNeedOOMRescan()
- {
- this->needOOMRescan = false;
- markContext.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext1.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext2.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext3.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- }
- bool
- Recycler::IsMemProtectMode()
- {
- return this->enableScanImplicitRoots;
- }
- size_t
- Recycler::GetUsedBytes()
- {
- return autoHeap.GetUsedBytes();
- }
- #if DBG
- BOOL
- Recycler::IsFreeObject(void * candidate)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(candidate);
- if (heapBlock != NULL)
- {
- return heapBlock->IsFreeObject(candidate);
- }
- return false;
- }
- #endif
- BOOL
- Recycler::IsValidObject(void* candidate, size_t minimumSize)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(candidate);
- if (heapBlock != NULL)
- {
- return heapBlock->IsValidObject(candidate) && (minimumSize == 0 || heapBlock->GetObjectSize(candidate) >= minimumSize);
- }
- return false;
- }
- void
- Recycler::Prime()
- {
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (GetRecyclerFlagsTable().IsEnabled(Js::ForceFragmentAddressSpaceFlag))
- {
- // Never prime the recycler if we are forced to fragment address space
- return;
- }
- #endif
- autoHeap.Prime();
- }
- void
- Recycler::AddExternalMemoryUsage(size_t size)
- {
- this->autoHeap.uncollectedAllocBytes += size;
- this->autoHeap.uncollectedExternalBytes += size;
- // Generally normal GC can cleanup the uncollectedAllocBytes. But if external components
- // do fast large allocations in a row, normal GC might not kick in. Let's force the GC
- // here if we need to collect anyhow.
- CollectNow<CollectOnAllocation>();
- }
- bool Recycler::RequestExternalMemoryAllocation(size_t size)
- {
- AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
- return !allocationPolicyManager || allocationPolicyManager->RequestAlloc(size);
- }
- void Recycler::ReportExternalMemoryFailure(size_t size)
- {
- AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
- if (allocationPolicyManager)
- {
- allocationPolicyManager->ReportFailure(size);
- }
- }
- void Recycler::ReportExternalMemoryFree(size_t size)
- {
- AllocationPolicyManager * allocationPolicyManager = autoHeap.GetAllocationPolicyManager();
- if (allocationPolicyManager)
- {
- allocationPolicyManager->ReportFree(size);
- }
- }
- /*------------------------------------------------------------------------------------------------
- * Idle Decommit
- *------------------------------------------------------------------------------------------------*/
- void
- Recycler::EnterIdleDecommit()
- {
- autoHeap.EnterIdleDecommit();
- #ifdef IDLE_DECOMMIT_ENABLED
- ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_None, IdleDecommitSignal_NeedTimer);
- #endif
- }
- void
- Recycler::LeaveIdleDecommit()
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- bool allowTimer = (this->concurrentIdleDecommitEvent != nullptr);
- IdleDecommitSignal idleDecommitSignal = autoHeap.LeaveIdleDecommit(allowTimer);
- if (idleDecommitSignal != IdleDecommitSignal_None)
- {
- Assert(allowTimer);
- // Reduce the number of times we need to signal the background thread
- // by detecting whether the thread is waiting on a time out or not
- if (idleDecommitSignal == IdleDecommitSignal_NeedSignal ||
- ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedTimer, IdleDecommitSignal_None) == IdleDecommitSignal_NeedSignal)
- {
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Need Signal\n"));
- Output::Flush();
- }
- #endif
- #pragma prefast(suppress:6387, "INVALID_PARAM_VALUE_1 We will never reach here if concurrentIdleDecommitEvent is NULL.");
- SetEvent(this->concurrentIdleDecommitEvent);
- }
- }
- #else
- autoHeap.LeaveIdleDecommit(false /*allowTimer*/);
- #endif
- }
- /*------------------------------------------------------------------------------------------------
- * Freeing
- *------------------------------------------------------------------------------------------------*/
- bool Recycler::ExplicitFreeLeaf(void* buffer, size_t size)
- {
- return ExplicitFreeInternalWrapper<ObjectInfoBits::LeafBit>(buffer, size);
- }
- bool Recycler::ExplicitFreeNonLeaf(void* buffer, size_t size)
- {
- return ExplicitFreeInternalWrapper<ObjectInfoBits::NoBit>(buffer, size);
- }
- size_t Recycler::GetAllocSize(size_t size)
- {
- size_t allocSize = size;
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- allocSize += verifyPad + sizeof(size_t);
- Assert(allocSize > size);
- }
- #endif
- return allocSize;
- }
- template <typename TBlockAttributes>
- void Recycler::SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes)
- {
- Assert(!heapBlock->IsLargeHeapBlock());
- Assert(heapBlock->GetObjectSize(buffer) == sizeCat);
- SmallHeapBlockT<TBlockAttributes>* smallBlock = (SmallHeapBlockT<TBlockAttributes>*)heapBlock;
- if ((attributes & ObjectInfoBits::LeafBit) == LeafBit)
- {
- Assert(smallBlock->IsLeafBlock());
- }
- else
- {
- Assert(smallBlock->IsAnyNormalBlock());
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- smallBlock->SetExplicitFreeBitForObject(buffer);
- #endif
- }
- template <ObjectInfoBits attributes>
- bool Recycler::ExplicitFreeInternalWrapper(void* buffer, size_t size)
- {
- Assert(buffer != nullptr);
- Assert(size > 0);
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ExplicitFreePhase))
- {
- return false;
- }
- #endif
- size_t allocSize = GetAllocSize(size);
- if (HeapInfo::IsSmallObject(allocSize))
- {
- return ExplicitFreeInternal<attributes, SmallAllocationBlockAttributes>(buffer, size, HeapInfo::GetAlignedSizeNoCheck(allocSize));
- }
- if (HeapInfo::IsMediumObject(allocSize))
- {
- return ExplicitFreeInternal<attributes, MediumAllocationBlockAttributes>(buffer, size, HeapInfo::GetMediumObjectAlignedSizeNoCheck(allocSize));
- }
- return false;
- }
- template <ObjectInfoBits attributes, typename TBlockAttributes>
- bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
- {
- // If the GC is in sweep state while FreeInternal is called, we might be executing a finalizer
- // which called Free, which would cause a "sweepable" buffer to be free-listed. Don't allow this.
- // Also don't allow freeing while we're shutting down the recycler since finalizers get executed
- // at this stage too
- if (this->IsSweeping() || this->IsExiting())
- {
- return false;
- }
- #if ENABLE_CONCURRENT_GC
- // We shouldn't be freeing object when we are running GC in thread
- Assert(this->IsConcurrentState() || !this->CollectionInProgress() || this->IsAllocatableCallbackState());
- #else
- Assert(!this->CollectionInProgress() || this->IsAllocatableCallbackState());
- #endif
- DebugOnly(RecyclerHeapObjectInfo info);
- Assert(this->FindHeapObject(buffer, FindHeapObjectFlags_NoFreeBitVerify, info));
- Assert((info.GetAttributes() & ~ObjectInfoBits::LeafBit) == 0); // Only NoBit or LeafBit
- HeapInfo * heapInfo = this->GetHeapInfo<attributes>();
- #if DBG || defined(RECYCLER_MEMORY_VERIFY) || defined(RECYCLER_PAGE_HEAP)
- // Either the mainThreadHandle is null (we're not thread bound)
- // or we should be calling this function on the main script thread
- Assert(this->mainThreadHandle == NULL ||
- ::GetCurrentThreadId() == ::GetThreadId(this->mainThreadHandle));
- HeapBlock* heapBlock = this->FindHeapBlock(buffer);
- Assert(heapBlock != nullptr);
- #if DBG
- Assert(heapInfo == heapBlock->GetHeapInfo());
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- if (this->IsPageHeapEnabled())
- {
- #ifdef STACK_BACK_TRACE
- if (this->ShouldCapturePageHeapFreeStack())
- {
- if (heapBlock->IsLargeHeapBlock())
- {
- LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
- if (largeHeapBlock->InPageHeapMode())
- {
- largeHeapBlock->CapturePageHeapFreeStack();
- }
- }
- }
- #endif
- // Don't do actual explicit free in page heap mode
- return false;
- }
- #endif
- SetExplicitFreeBitOnSmallBlock<TBlockAttributes>(heapBlock, sizeCat, buffer, attributes);
- #endif
- if (TBlockAttributes::IsMediumBlock)
- {
- heapInfo->FreeMediumObject<attributes>(buffer, sizeCat);
- }
- else
- {
- heapInfo->FreeSmallObject<attributes>(buffer, sizeCat);
- }
- if (size > sizeof(FreeObject) || TBlockAttributes::IsMediumBlock)
- {
- // Do this on the background somehow?
- byte expectedFill = 0;
- size_t fillSize = size - sizeof(FreeObject);
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- expectedFill = Recycler::VerifyMemFill;
- }
- #endif
- memset(((char*)buffer) + sizeof(FreeObject), expectedFill, fillSize);
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- this->SetTrackerData(buffer, &TrackerData::ExplicitFreeListObjectData);
- }
- #endif
- return true;
- }
- /*------------------------------------------------------------------------------------------------
- * Allocation
- *------------------------------------------------------------------------------------------------*/
- char *
- Recycler::TryLargeAlloc(HeapInfo * heap, size_t size, ObjectInfoBits attributes, bool nothrow)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- Assert(size != 0);
- size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);
- if (sizeCat == 0)
- {
- // overflow scenario
- // if onthrow is false, throw out of memory
- // otherwise, return null
- if (nothrow == false)
- {
- this->OutOfMemory();
- }
- return nullptr;
- }
- char * memBlock;
- if (heap->largeObjectBucket.largeBlockList != nullptr)
- {
- memBlock = heap->largeObjectBucket.largeBlockList->Alloc(sizeCat, attributes);
- if (memBlock != nullptr)
- {
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyLargeAllocZeroFill(memBlock, sizeCat, attributes);
- #endif
- return memBlock;
- }
- }
- // We don't care whether a GC happened here or not, because we are not reusing freed
- // large objects. We might try to allocate from existing block if we implement
- // large object reuse.
- if (!this->disableCollectOnAllocationHeuristics)
- {
- CollectNow<CollectOnAllocation>();
- }
- #ifdef RECYCLER_PAGE_HEAP
- if (IsPageHeapEnabled())
- {
- if (heap->largeObjectBucket.IsPageHeapEnabled(attributes))
- {
- memBlock = heap->largeObjectBucket.PageHeapAlloc(this, sizeCat, size, (ObjectInfoBits)attributes, heap->pageHeapMode, nothrow);
- if (memBlock != nullptr)
- {
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyLargeAllocZeroFill(memBlock, size, attributes);
- #endif
- return memBlock;
- }
- }
- }
- #endif
- LargeHeapBlock * heapBlock = heap->AddLargeHeapBlock(sizeCat);
- if (heapBlock == nullptr)
- {
- return nullptr;
- }
- memBlock = heapBlock->Alloc(sizeCat, attributes);
- Assert(memBlock != nullptr);
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyLargeAllocZeroFill(memBlock, sizeCat, attributes);
- #endif
- return memBlock;
- }
- template <bool nothrow>
- char*
- Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- size_t limit = (size_t)GetRecyclerFlagsTable().MaxSingleAllocSizeInMB * 1024 * 1024;
- #else
- size_t limit = (size_t)CONFIG_FLAG(MaxSingleAllocSizeInMB) * 1024 * 1024;
- #endif
- if (size >= limit)
- {
- if (nothrow == false)
- {
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if (GetRecyclerFlagsTable().EnableFatalErrorOnOOM)
- {
- if (this->IsMemProtectMode())
- {
- MemGCSingleAllocationLimit_unrecoverable_error();
- }
- else
- {
- RecyclerSingleAllocationLimit_unrecoverable_error();
- }
- }
- #endif
- this->OutOfMemory();
- }
- else
- {
- return nullptr;
- }
- }
- char * addr = TryLargeAlloc(heap, size, attributes, nothrow);
- if (addr == nullptr)
- {
- // Force a collection and try to allocate again.
- this->CollectNow<CollectNowForceInThread>();
- addr = TryLargeAlloc(heap, size, attributes, nothrow);
- if (addr == nullptr)
- {
- if (nothrow == false)
- {
- // Still fails, we are out of memory
- // Since nothrow is false, it's okay to throw here
- this->OutOfMemory();
- }
- else
- {
- return nullptr;
- }
- }
- }
- autoHeap.uncollectedAllocBytes += size;
- return addr;
- }
- // Explicitly instantiate both versions of LargeAlloc
- template char* Recycler::LargeAlloc<true>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
- template char* Recycler::LargeAlloc<false>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
- void
- Recycler::OutOfMemory()
- {
- outOfMemoryFunc();
- }
- void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(void* recyclerAddr, size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
- {
- Assert(recyclerAddr);
- return ((Recycler*)recyclerAddr)->GetNormalHeapBlockAllocatorInfoForNativeAllocation(allocSize, allocatorAddress, endAddressOffset, freeListOffset, allowBumpAllocation, isOOPJIT);
- }
- void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
- {
- Assert(HeapInfo::IsAlignedSize(allocSize));
- Assert(HeapInfo::IsSmallObject(allocSize));
- allocatorAddress = (char*)this + offsetof(Recycler, autoHeap)
- + offsetof(HeapInfoManager, defaultHeap)
- + offsetof(HeapInfo, heapBuckets)
- + sizeof(HeapBucketGroup<SmallAllocationBlockAttributes>)*((uint)(allocSize >> HeapConstants::ObjectAllocationShift) - 1)
- + HeapBucketGroup<SmallAllocationBlockAttributes>::GetHeapBucketOffset()
- + HeapBucketT<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetAllocatorHeadOffset();
- endAddressOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetEndAddressOffset();
- freeListOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetFreeObjectListOffset();;
- if (!isOOPJIT)
- {
- Assert(allocatorAddress == GetAddressOfAllocator<NoBit>(allocSize));
- Assert(endAddressOffset == GetEndAddressOffset<NoBit>(allocSize));
- Assert(freeListOffset == GetFreeObjectListOffset<NoBit>(allocSize));
- Assert(allowBumpAllocation == AllowNativeCodeBumpAllocation());
- }
- if (!allowBumpAllocation)
- {
- freeListOffset = endAddressOffset;
- }
- }
- bool Recycler::AllowNativeCodeBumpAllocation()
- {
- // In debug builds, if we need to track allocation info, we pretend there is no pointer-bump-allocation space
- // on this page, so that we always fail the check in native code and go to helper, which does the tracking.
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- return false;
- }
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->verifyEnabled)
- {
- return false;
- }
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- // Don't allow bump allocation in the JIT when page heap is turned on
- if (this->IsPageHeapEnabled())
- {
- return false;
- }
- #endif
- return true;
- }
- void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBlock, size_t sizeCat)
- {
- Assert(HeapInfo::IsAlignedSize(sizeCat));
- Assert(HeapInfo::IsSmallObject(sizeCat));
- #ifdef PROFILE_RECYCLER_ALLOC
- AssertMsg(!Recycler::DoProfileAllocTracker(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
- #endif
- RecyclerMemoryTracking::ReportAllocation(recycler, memBlock, sizeCat);
- RECYCLER_PERF_COUNTER_INC(LiveObject);
- RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, sizeCat);
- RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, sizeCat);
- #ifdef RECYCLER_MEMORY_VERIFY
- AssertMsg(!recycler->VerifyEnabled(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
- #endif
- }
- /*------------------------------------------------------------------------------------------------
- * FindRoots
- *------------------------------------------------------------------------------------------------*/
- // xplat-todo: Unify these two variants of GetStackBase
- #ifdef _WIN32
- static void* GetStackBase()
- {
- return ((NT_TIB *)NtCurrentTeb())->StackBase;
- }
- #else
- static void* GetStackBase()
- {
- ULONG_PTR highLimit = 0;
- ULONG_PTR lowLimit = 0;
- ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
- return (void*) highLimit;
- }
- #endif
- #if _M_IX86
- // REVIEW: For x86, do we care about scanning esp/ebp?
- // At GC time, they shouldn't be pointing to GC memory.
- #define SAVE_THREAD_CONTEXT() \
- void** targetBuffer = this->savedThreadContext.GetRegisters(); \
- __asm { push eax } \
- __asm { mov eax, targetBuffer } \
- __asm { mov [eax], esp} \
- __asm { mov [eax+0x4], eax} \
- __asm { mov [eax+0x8], ebx} \
- __asm { mov [eax+0xc], ecx} \
- __asm { mov [eax+0x10], edx} \
- __asm { mov [eax+0x14], ebp} \
- __asm { mov [eax+0x18], esi} \
- __asm { mov [eax+0x1c], edi} \
- __asm { pop eax } \
- SAVE_THREAD_ASAN_FAKE_STACK()
- #elif _M_ARM
- #define SAVE_THREAD_CONTEXT() \
- arm_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
- SAVE_THREAD_ASAN_FAKE_STACK()
- #elif _M_ARM64
- #define SAVE_THREAD_CONTEXT() \
- arm64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
- SAVE_THREAD_ASAN_FAKE_STACK()
- #elif _M_AMD64
- #define SAVE_THREAD_CONTEXT() \
- amd64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters()); \
- SAVE_THREAD_ASAN_FAKE_STACK()
- #else
- #error Unexpected architecture
- #endif
- size_t
- Recycler::ScanArena(ArenaData * alloc, bool background)
- {
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Guest Arena %p: "), alloc);
- }
- #endif
- size_t scanRootBytes = 0;
- BEGIN_DUMP_OBJECT_ADDRESS(_u("Guest Arena"), alloc);
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- // The new write watch batching logic broke the write watch handling here.
- // For now, just disable write watch for guest arenas.
- // TODO: Re-enable this in the future.
- #if FALSE
- // Note, guest arenas are allocated out of the large block page allocator.
- bool writeWatch = alloc->GetPageAllocator() == &this->recyclerLargeBlockPageAllocator;
- // Only use write watch when we are doing rescan (Partial collect or finish concurrent)
- if (writeWatch && this->collectionState == CollectionStateRescanFindRoots)
- {
- scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetBigBlocks(background));
- scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetFullBlocks());
- }
- else
- #endif
- #endif
- {
- scanRootBytes += TryMarkBigBlockList(alloc->GetBigBlocks(background));
- scanRootBytes += TryMarkBigBlockList(alloc->GetFullBlocks());
- }
- scanRootBytes += TryMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- // The arena has been scanned so the full blocks can be rearranged at this point
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if (background || !GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan)
- #endif
- {
- alloc->SetLockBlockList(false);
- }
- return scanRootBytes;
- }
- #if DBG
- bool
- Recycler::ExpectStackSkip() const
- {
- // Okay to skip the stack scan if we're in leak check mode
- bool expectStackSkip = false;
- #ifdef LEAK_REPORT
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
- #endif
- #ifdef CHECK_MEMORY_LEAK
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().CheckMemoryLeak;
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- expectStackSkip = expectStackSkip || (this->objectGraphDumper != nullptr);
- #endif
- #if defined(INTERNAL_MEM_PROTECT_HEAP_ALLOC)
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().MemProtectHeap;
- #endif
- return expectStackSkip || isExternalStackSkippingGC;
- }
- #endif
- #pragma warning(push)
- #pragma warning(disable:4731) // 'pointer' : frame pointer register 'register' modified by inline assembly code
- // disable address sanitizer, since it doesn't handle custom stack walks well
- NO_SANITIZE_ADDRESS
- size_t
- Recycler::ScanStack()
- {
- if (this->skipStack)
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(this->isFinishGCOnIdle || this->isConcurrentGCOnIdle || this->ExpectStackSkip());
- #else
- Assert(this->ExpectStackSkip());
- #endif
- return 0;
- }
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- GCETW(GC_SCANSTACK_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ScanStackPhase);
- SAVE_THREAD_CONTEXT();
- void * stackTop = this->savedThreadContext.GetStackTop();
- void * stackStart = GetStackBase();
- Assert(stackStart > stackTop);
- size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Stack %p(%8d): "), stackTop, (char *)stackStart - (char *)stackTop);
- }
- #endif
- collectionWrapper->OnScanStackCallback((void**)stackTop, stackScanned, this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
- bool doSpecialMark = collectionWrapper->DoSpecialMarkOnScanStack();
- BEGIN_DUMP_OBJECT(this, _u("Registers"));
- // We will not scan interior pointers on stack if we are not in script or we are in mem-protect mode.
- if (!this->HasNativeGCHost() && (!this->isInScript || this->IsMemProtectMode()))
- {
- if (doSpecialMark)
- {
- ScanMemoryInline<true>(
- this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- else
- {
- ScanMemoryInline<false>(
- this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- }
- else
- {
- // We may have interior pointers on the stack such as pointers in the middle of the character buffers backing a JavascriptString or SubString object.
- // To prevent UAFs of these buffers after the GC we will always do MarkInterior for the pointers on stack. This is necessary only when we are doing a
- // GC while running a script or when we have a host who allocates objects on the Chakra heap.
- if (doSpecialMark)
- {
- ScanMemoryInline<true, true /* forceInterior */>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- else
- {
- ScanMemoryInline<false, true /* forceInterior */>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- }
- END_DUMP_OBJECT(this);
- BEGIN_DUMP_OBJECT(this, _u("Stack"));
- // We will not scan interior pointers on stack if we are not in script or we are in mem-protect mode.
- if (!this->HasNativeGCHost() && (!this->isInScript || this->IsMemProtectMode()))
- {
- if (doSpecialMark)
- {
- ScanMemoryInline<true>((void**) stackTop, stackScanned
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- else
- {
- ScanMemoryInline<false>((void**) stackTop, stackScanned
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- }
- else
- {
- // We may have interior pointers on the stack such as pointers in the middle of the character buffers backing a JavascriptString or SubString object.
- // To prevent UAFs of these buffers after the GC we will always do MarkInterior for the pointers on stack. This is necessary only when we are doing a
- // GC while running a script or when we have a host who allocates objects on the Chakra heap.
- if (doSpecialMark)
- {
- ScanMemoryInline<true, true /* forceInterior */>((void**)stackTop, stackScanned
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- else
- {
- ScanMemoryInline<false, true /* forceInterior */>((void**)stackTop, stackScanned
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- }
- }
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- RECYCLER_PROFILE_EXEC_END(this, Js::ScanStackPhase);
- RECYCLER_STATS_ADD(this, stackCount, this->collectionStats.markData.markCount - lastMarkCount);
- GCETW(GC_SCANSTACK_STOP, (this));
- return stackScanned;
- }
- #pragma warning(pop)
- template <bool background>
- size_t Recycler::ScanPinnedObjects()
- {
- size_t scanRootBytes = 0;
- BEGIN_DUMP_OBJECT(this, _u("Pinned"));
- {
- this->TryMarkNonInterior(transientPinnedObject, &transientPinnedObject /* parentReference */);
- if (this->scanPinnedObjectMap)
- {
- // We are scanning the pinned object map now, we don't need to rescan unless
- // we reset mark or we add stuff to the map in Recycler::AddRef
- this->scanPinnedObjectMap = false;
- pinnedObjectMap.MapAndRemoveIf([this, &scanRootBytes](void * obj, PinRecord const& refCount)
- {
- if (refCount == 0)
- {
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- Assert(refCount.stackBackTraces == nullptr);
- #endif
- #endif
- // Only remove if we are not doing this in the background.
- return !background;
- }
- this->TryMarkNonInterior(obj, static_cast<void*>(const_cast<PinRecord*>(&refCount)) /* parentReference */);
- scanRootBytes += sizeof(void *);
- return false;
- });
- if (!background)
- {
- this->hasPendingUnpinnedObject = false;
- }
- }
- }
- END_DUMP_OBJECT(this);
- if (background)
- {
- // Re-enable resize now that we are done
- pinnedObjectMap.EnableResize();
- }
- return scanRootBytes;
- }
- void
- RecyclerScanMemoryCallback::operator()(void** obj, size_t byteCount)
- {
- this->recycler->ScanMemoryInline<false>(obj, byteCount);
- }
- size_t
- Recycler::FindRoots()
- {
- size_t scanRootBytes = 0;
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- GCETW(GC_SCANROOTS_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootPhase);
- #ifdef ENABLE_PROJECTION
- {
- AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
- BEGIN_DUMP_OBJECT(this, _u("External Weak Referenced Roots"));
- Assert(!this->IsInRefCountTrackingForProjection());
- #if DBG
- AutoIsInRefCountTrackingForProjection autoIsInRefCountTrackingForProjection(this);
- #endif
- collectionWrapper->MarkExternalWeakReferencedObjects(this->inPartialCollectMode);
- END_DUMP_OBJECT(this);
- }
- #endif
- // go through ITracker* stuff. Don't need to do it if we are doing a partial collection
- // as we keep track and mark all trackable objects.
- // Do this first because the host might unpin stuff in the process
- if (externalRootMarker != NULL)
- {
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootExtPhase);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning External Roots: "));
- }
- #endif
- BEGIN_DUMP_OBJECT(this, _u("External Roots"));
- // PARTIALGC-TODO: How do we count external roots?
- externalRootMarker(externalRootMarkerContext);
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootExtPhase);
- }
- }
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Pinned Objects: "));
- }
- #endif
- scanRootBytes += this->ScanPinnedObjects</*background = */false>();
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->hasPendingConcurrentFindRoot);
- #endif
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootArenaPhase);
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- #if ENABLE_CONCURRENT_GC
- if (allocator.pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- }
- else if (this->backgroundFinishMarkCount == 0)
- #endif
- {
- // Only scan arena if we haven't finished mark in the background
- // (which is true if concurrent GC is disabled)
- scanRootBytes += ScanArena(&allocator, false);
- }
- }
- this->hasPendingDeleteGuestArena = false;
- DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
- while (externalGuestArenaIter.Next())
- {
- scanRootBytes += ScanArena(externalGuestArenaIter.Data(), false);
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootArenaPhase);
- this->ScanImplicitRoots();
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootPhase);
- GCETW(GC_SCANROOTS_STOP, (this));
- RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
- return scanRootBytes;
- }
- void
- Recycler::ScanImplicitRoots()
- {
- if (this->enableScanImplicitRoots)
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindImplicitRootPhase);
- if (!this->hasScannedInitialImplicitRoots)
- {
- this->ScanInitialImplicitRoots();
- this->hasScannedInitialImplicitRoots = true;
- }
- else
- {
- this->ScanNewImplicitRoots();
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::FindImplicitRootPhase);
- }
- }
- size_t
- Recycler::TryMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- ArenaMemoryBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t byteCount = blockp->nbytes;
- scanRootBytes += byteCount;
- this->ScanMemory<false>(base, byteCount);
- blockp = blockp->next;
- }
- return scanRootBytes;
- }
- #if ENABLE_CONCURRENT_GC
- #if FALSE
- size_t
- Recycler::TryMarkBigBlockListWithWriteWatch(BigBlock * memoryBlocks)
- {
- DWORD pageSize = AutoSystemInfo::PageSize;
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- // Reset the write watch bit if we are scanning this in the background thread
- DWORD const writeWatchFlags = this->IsConcurrentFindRootState()? WRITE_WATCH_FLAG_RESET : 0;
- while (blockp != NULL)
- {
- char * currentAddress = (char *)blockp->GetBytes();
- char * endAddress = currentAddress + blockp->currentByte;
- char * currentPageStart = (char *)blockp->allocation;
- while (currentAddress < endAddress)
- {
- void * written;
- ULONG_PTR count = 1;
- if (::GetWriteWatch(writeWatchFlags, currentPageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) != 0 || count == 1)
- {
- char * currentEnd = min(currentPageStart + pageSize, endAddress);
- size_t byteCount = (size_t)(currentEnd - currentAddress);
- scanRootBytes += byteCount;
- this->ScanMemory<false>((void **)currentAddress, byteCount);
- }
- currentPageStart += pageSize;
- currentAddress = currentPageStart;
- }
- blockp = blockp->nextBigBlock;
- }
- return scanRootBytes;
- }
- #endif
- #endif
- size_t
- Recycler::TryMarkBigBlockList(BigBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base = (void**)blockp->GetBytes();
- size_t byteCount = blockp->currentByte;
- scanRootBytes += byteCount;
- this->ScanMemory<false>(base, byteCount);
- blockp = blockp->nextBigBlock;
- }
- return scanRootBytes;
- }
- void
- Recycler::ScanInitialImplicitRoots()
- {
- autoHeap.ScanInitialImplicitRoots();
- }
- void
- Recycler::ScanNewImplicitRoots()
- {
- autoHeap.ScanNewImplicitRoots();
- }
- /*------------------------------------------------------------------------------------------------
- * Mark
- *------------------------------------------------------------------------------------------------*/
- void
- Recycler::ResetMarks(ResetMarkFlags flags)
- {
- Assert(!this->CollectionInProgress());
- this->SetCollectionState(CollectionStateResetMarks);
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Reset marks\n"));
- GCETW(GC_RESETMARKS_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetMarksPhase);
- Assert(IsMarkStackEmpty());
- this->scanPinnedObjectMap = true;
- this->hasScannedInitialImplicitRoots = false;
- heapBlockMap.ResetMarks();
- autoHeap.ResetMarks(flags);
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetMarksPhase);
- GCETW(GC_RESETMARKS_STOP, (this));
- #ifdef RECYCLER_MARK_TRACK
- this->ClearMarkMap();
- #endif
- }
- #ifdef RECYCLER_MARK_TRACK
- void Recycler::ClearMarkMap()
- {
- this->markMap->Clear();
- }
- void Recycler::PrintMarkMap()
- {
- this->markMap->Map([](void* key, void* value)
- {
- Output::Print(_u("0x%P => 0x%P\n"), key, value);
- });
- }
- #endif
- #if DBG
- void
- Recycler::CheckAllocExternalMark() const
- {
- Assert(!disableThreadAccessCheck);
- Assert(GetCurrentThreadContextId() == mainThreadId);
- #if ENABLE_CONCURRENT_GC
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert((this->IsMarkState() || this->IsPostEnumHeapValidationInProgress()) && collectionState != CollectionStateConcurrentMark);
- #else
- Assert(this->IsMarkState() && collectionState != CollectionStateConcurrentMark);
- #endif
- #else
- Assert(this->IsMarkState());
- #endif
- }
- #endif
- void
- Recycler::TryMarkNonInterior(void* candidate, void* parentReference)
- {
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
- #else
- Assert(!isHeapEnumInProgress);
- #endif
- Assert(this->collectionState != CollectionStateParallelMark);
- markContext.Mark</*parallel */ false, /* interior */ false, /* doSpecialMark */ false>(candidate, parentReference);
- }
- void
- Recycler::TryMarkInterior(void* candidate, void* parentReference)
- {
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
- #else
- Assert(!isHeapEnumInProgress);
- #endif
- Assert(this->collectionState != CollectionStateParallelMark);
- markContext.Mark</*parallel */ false, /* interior */ true, /* doSpecialMark */ false>(candidate, parentReference);
- }
- template <bool parallel, bool interior>
- void
- Recycler::ProcessMarkContext(MarkContext * markContext)
- {
- #if ENABLE_CONCURRENT_GC
- // Copying the markContext onto the stack messes up tracked object handling, because
- // the tracked object will call TryMark[Non]Interior to report its references.
- // These functions implicitly use the main markContext on the Recycler, but this will
- // be overridden if we're processing the main markContext here.
- // So, don't do this if we are going to process tracked objects.
- // (This will be the case if we're not queuing and we're not in partial mode, which ignores tracked objects.)
- // In this case we shouldn't be parallel anyway, so we don't need to worry about cache behavior.
- // We should revisit how we manage markContexts in general in the future, and clean this up
- // by passing the MarkContext through to the tracked object's Mark method.
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode || DoQueueTrackedObject())
- #else
- if (DoQueueTrackedObject())
- #endif
- {
- // The markContext as passed is one of the markContexts that lives on the Recycler.
- // Copy it locally for processing.
- // This serves two purposes:
- // (1) Allow for better codegen because the markContext is local and we don't need to track the this pointer separately
- // (because all the key processing is inlined into this function).
- // (2) Ensure we don't have weird cache behavior because we're accidentally writing to the same cache line from
- // multiple threads during parallel marking.
- MarkContext localMarkContext = *markContext;
- // Do the actual marking.
- localMarkContext.ProcessMark<parallel, interior>();
- // Copy back to the original location.
- *markContext = localMarkContext;
- // Clear the local mark context.
- localMarkContext.Clear();
- }
- else
- #endif
- {
- Assert(!parallel);
- markContext->ProcessMark<parallel, interior>();
- }
- }
- void
- Recycler::ProcessMark(bool background)
- {
- #if ENABLE_CONCURRENT_GC
- if (background)
- {
- GCETW(GC_BACKGROUNDMARK_START, (this, backgroundRescanCount));
- }
- else
- #endif
- {
- GCETW(GC_MARK_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
- if (this->enableScanInteriorPointers)
- {
- this->ProcessMarkContext</* parallel */ false, /* interior */ true>(&markContext);
- }
- else
- {
- this->ProcessMarkContext</* parallel */ false, /* interior */ false>(&markContext);
- }
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
- #if ENABLE_CONCURRENT_GC
- if (background)
- {
- GCETW(GC_BACKGROUNDMARK_STOP, (this, backgroundRescanCount));
- }
- else
- #endif
- {
- GCETW(GC_MARK_STOP, (this));
- }
- DebugOnly(this->markContext.VerifyPostMarkState());
- }
- void
- Recycler::ProcessParallelMark(bool background, MarkContext * markContext)
- {
- #if ENABLE_CONCURRENT_GC
- if (background)
- {
- GCETW(GC_BACKGROUNDPARALLELMARK_START, (this, backgroundRescanCount));
- }
- else
- #endif
- {
- GCETW(GC_PARALLELMARK_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
- if (this->enableScanInteriorPointers)
- {
- this->ProcessMarkContext</* parallel */ true, /* interior */ true>(markContext);
- }
- else
- {
- this->ProcessMarkContext</* parallel */ true, /* interior */ false>(markContext);
- }
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
- #if ENABLE_CONCURRENT_GC
- if (background)
- {
- GCETW(GC_BACKGROUNDPARALLELMARK_STOP, (this, backgroundRescanCount));
- }
- else
- #endif
- {
- GCETW(GC_PARALLELMARK_STOP, (this));
- }
- }
- void
- Recycler::Mark()
- {
- // Marking in thread, we can just pre-mark them
- ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InThreadImplicitRoots : ResetMarkFlags_InThread);
- this->SetCollectionState(CollectionStateFindRoots);
- RootMark(CollectionStateMark);
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::StartQueueTrackedObject()
- {
- Assert(!this->queueTrackedObject);
- Assert(!this->HasPendingTrackObjects());
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- Assert(!this->inPartialCollectMode);
- #endif
- this->queueTrackedObject = true;
- }
- bool
- Recycler::DoQueueTrackedObject() const
- {
- Assert(this->queueTrackedObject || !this->IsConcurrentMarkState());
- Assert(this->queueTrackedObject || this->isProcessingTrackedObjects || !this->HasPendingTrackObjects());
- #if ENABLE_PARTIAL_GC
- Assert(this->queueTrackedObject || this->inPartialCollectMode || !(this->collectionState == CollectionStateParallelMark));
- Assert(!this->queueTrackedObject || (this->clientTrackedObjectList.Empty() && !this->inPartialCollectMode));
- #else
- Assert(this->queueTrackedObject || !(this->collectionState == CollectionStateParallelMark));
- #endif
- return this->queueTrackedObject;
- }
- #endif
- void
- Recycler::ResetCollectionState()
- {
- Assert(IsMarkStackEmpty());
- this->SetCollectionState(CollectionStateNotCollecting);
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- this->inExhaustiveCollection = false;
- this->inDecommitNowCollection = false;
- #if ENABLE_CONCURRENT_GC
- CleanupPendingUnroot();
- #endif
- #if ENABLE_PARTIAL_GC
- if (inPartialCollectMode)
- {
- FinishPartialCollect();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- // Reset the collection stats.
- this->collectionStats.finalizeCount = this->autoHeap.GetFinalizeCount();
- #endif
- }
- void
- Recycler::ResetMarkCollectionState()
- {
- // If we aborted after doing a background Rescan, there will be entries in the markContext.
- // Abort these entries and reset the markContext state.
- markContext.Abort();
- // If we aborted after doing a background parallel Mark, we wouldn't have cleaned up the
- // parallel markContexts yet. Clean these up now.
- // Note parallelMarkContext1 is not used in background parallel (see DoBackgroundParallelMark)
- parallelMarkContext2.Cleanup();
- parallelMarkContext3.Cleanup();
- this->ClearNeedOOMRescan();
- DebugOnly(this->isProcessingRescan = false);
- #if ENABLE_CONCURRENT_GC
- // If we're reseting the mark collection state, we need to unlock the block list
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- allocator.SetLockBlockList(false);
- }
- this->queueTrackedObject = false;
- #endif
- ResetCollectionState();
- }
- void
- Recycler::ResetHeuristicCounters()
- {
- autoHeap.lastUncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
- autoHeap.uncollectedAllocBytes = 0;
- autoHeap.uncollectedExternalBytes = 0;
- ResetPartialHeuristicCounters();
- }
- void Recycler::ResetPartialHeuristicCounters()
- {
- #if ENABLE_PARTIAL_GC
- autoHeap.uncollectedNewPageCount = 0;
- #endif
- }
- void
- Recycler::ScheduleNextCollection()
- {
- this->tickCountNextCollection = ::GetTickCount() + RecyclerHeuristic::TickCountCollection;
- this->tickCountNextFinishCollection = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::PrepareSweep()
- {
- autoHeap.PrepareSweep();
- }
- #endif
- size_t
- Recycler::RescanMark(DWORD waitTime)
- {
- bool const onLowMemory = this->NeedOOMRescan();
- // REVIEW: Why are we asserting for DoQueueTrackedObject here?
- // Should we split this into different asserts depending on whether
- // concurrent or partial is enabled?
- #if ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || DoQueueTrackedObject());
- #else
- Assert(DoQueueTrackedObject());
- #endif
- #endif
- {
- // We are about to do a rescan mark, which for consistency requires the runtime to stop any additional mutator threads
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->PreRescanMarkCallback();
- }
- // Always called in-thread
- Assert(collectionState == CollectionStateRescanFindRoots);
- #if ENABLE_CONCURRENT_GC
- if (!onLowMemory && // Don't do background finish mark if we are low on memory
- // Only do background finish mark if we have a time limit or it is forced
- (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase) || waitTime != INFINITE) &&
- // Don't do background finish mark if we failed to finish mark too many times
- (this->backgroundFinishMarkCount < RecyclerHeuristic::MaxBackgroundFinishMarkCount(this->GetRecyclerFlagsTable())))
- {
- this->PrepareBackgroundFindRoots();
- if (StartConcurrent(CollectionStateConcurrentFinishMark))
- {
- this->backgroundFinishMarkCount++;
- this->PrepareSweep();
- GCETW(GC_RESCANMARKWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::RescanMark);
- GCETW(GC_RESCANMARKWAIT_STOP, (this, !waited));
- if (!waited)
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase, _u("Finish mark timed out\n"));
- {
- // We timed out doing the finish mark, notify the runtime
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->RescanMarkTimeoutCallback();
- }
- return Recycler::InvalidScanRootBytes;
- }
- Assert(collectionState == CollectionStateRescanWait);
- this->SetCollectionState(CollectionStateRescanFindRoots);
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- Assert(autoHeap.GetWriteWatchPageCount() == 0);
- }
- #endif
- return this->backgroundRescanRootBytes;
- }
- this->RevertPrepareBackgroundFindRoots();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- return FinishMarkRescan(false) * AutoSystemInfo::PageSize;
- }
- size_t
- Recycler::FinishMark(DWORD waitTime)
- {
- size_t scannedRootBytes = RescanMark(waitTime);
- Assert(waitTime != INFINITE || scannedRootBytes != Recycler::InvalidScanRootBytes);
- if (scannedRootBytes != Recycler::InvalidScanRootBytes)
- {
- #if DBG && ENABLE_PARTIAL_GC
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("CTO: %d\n"), this->clientTrackedObjectList.Count());
- #endif
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing client tracked objects\n"));
- ProcessClientTrackedObjects();
- }
- else
- #endif
- #if ENABLE_CONCURRENT_GC
- if (DoQueueTrackedObject())
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing regular tracked objects\n"));
- ProcessTrackedObjects();
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- Assert(this->backgroundFinishMarkCount == 0 || autoHeap.GetWriteWatchPageCount() == 0);
- }
- #endif
- }
- #endif
- // Continue to mark from root one more time
- scannedRootBytes += RootMark(CollectionStateRescanMark);
- }
- return scannedRootBytes;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::DoParallelMark()
- {
- Assert(this->enableParallelMark);
- Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
- // Split the mark stack into [this->maxParallelism] equal pieces.
- // The actual # of splits is returned, in case the stack was too small to split that many ways.
- MarkContext * splitContexts[3] = { ¶llelMarkContext1, ¶llelMarkContext2, ¶llelMarkContext3 };
- uint actualSplitCount = markContext.Split(this->maxParallelism - 1, splitContexts);
- Assert(actualSplitCount <= 3);
- // If we failed to split at all, just mark in thread with no parallelism.
- if (actualSplitCount == 0)
- {
- this->ProcessMark(false);
- return;
- }
- // We need to queue tracked objects while we mark in parallel.
- // (Unless it's a partial collect, in which case we don't process tracked objects at all)
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- StartQueueTrackedObject();
- }
- // Kick off marking on the background thread
- bool concurrentSuccess = StartConcurrent(CollectionStateParallelMark);
- // If there's enough work to split, then kick off marking on parallel threads too.
- // If the threads haven't been created yet, this will create them (or fail).
- bool parallelSuccess1 = false;
- bool parallelSuccess2 = false;
- if (concurrentSuccess && actualSplitCount >= 2)
- {
- parallelSuccess1 = parallelThread1.StartConcurrent();
- if (parallelSuccess1 && actualSplitCount == 3)
- {
- parallelSuccess2 = parallelThread2.StartConcurrent();
- }
- }
- // Process our portion of the split.
- this->ProcessParallelMark(false, ¶llelMarkContext1);
- // If we successfully launched parallel work, wait for it to complete.
- // If we failed, then process the work in-thread now.
- if (concurrentSuccess)
- {
- WaitForConcurrentThread(INFINITE, RecyclerWaitReason::DoParallelMark);
- }
- else
- {
- this->ProcessParallelMark(false, &markContext);
- }
- if (actualSplitCount >= 2)
- {
- if (parallelSuccess1)
- {
- parallelThread1.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(false, ¶llelMarkContext2);
- }
- if (actualSplitCount == 3)
- {
- if (parallelSuccess2)
- {
- parallelThread2.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(false, ¶llelMarkContext3);
- }
- }
- }
- this->SetCollectionState(CollectionStateMark);
- // Process tracked objects, if any, then do one final mark phase in case they marked any new objects.
- // (Unless it's a partial collect, in which case we don't process tracked objects at all)
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- this->ProcessTrackedObjects();
- this->ProcessMark(false);
- }
- #if ENABLE_PARTIAL_GC
- else
- {
- Assert(!this->HasPendingTrackObjects());
- }
- #endif
- }
- void
- Recycler::DoBackgroundParallelMark()
- {
- // Split the mark stack into [this->maxParallelism - 1] equal pieces (thus, "- 2" below).
- // The actual # of splits is returned, in case the stack was too small to split that many ways.
- // The parallel threads are hardwired to use parallelMarkContext2/3, so we split using those.
- uint actualSplitCount = 0;
- MarkContext * splitContexts[2] = { ¶llelMarkContext2, ¶llelMarkContext3 };
- if (this->enableParallelMark)
- {
- Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
- if (this->maxParallelism > 2)
- {
- actualSplitCount = markContext.Split(this->maxParallelism - 2, splitContexts);
- }
- }
- Assert(actualSplitCount <= 2);
- // If we failed to split at all, just mark in thread with no parallelism.
- if (actualSplitCount == 0)
- {
- this->ProcessMark(true);
- return;
- }
- #if ENABLE_PARTIAL_GC
- // We should already be set up to queue tracked objects, unless this is a partial collect
- Assert(this->DoQueueTrackedObject() || this->inPartialCollectMode);
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- this->SetCollectionState(CollectionStateBackgroundParallelMark);
- // Kick off marking on parallel threads too, if there is work for them
- // If the threads haven't been created yet, this will create them (or fail).
- bool parallelSuccess1 = false;
- bool parallelSuccess2 = false;
- parallelSuccess1 = parallelThread1.StartConcurrent();
- if (parallelSuccess1 && actualSplitCount == 2)
- {
- parallelSuccess2 = parallelThread2.StartConcurrent();
- }
- // Process our portion of the split.
- this->ProcessParallelMark(true, &markContext);
- // If we successfully launched parallel work, wait for it to complete.
- // If we failed, then process the work in-thread now.
- if (parallelSuccess1)
- {
- parallelThread1.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(true, ¶llelMarkContext2);
- }
- if (actualSplitCount == 2)
- {
- if (parallelSuccess2)
- {
- parallelThread2.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(true, ¶llelMarkContext3);
- }
- }
- this->SetCollectionState(CollectionStateConcurrentMark);
- }
- #endif
- size_t
- Recycler::RootMark(CollectionState markState)
- {
- size_t scannedRootBytes = 0;
- Assert(!this->NeedOOMRescan() || markState == CollectionStateRescanMark);
- #if ENABLE_PARTIAL_GC
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect: %d\n"), this->inPartialCollectMode);
- #else
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect not available\n"));
- #endif
- Assert(collectionState == (markState == CollectionStateMark? CollectionStateFindRoots : CollectionStateRescanFindRoots));
- BOOL stacksScannedByRuntime = FALSE;
- {
- // We are about to scan roots in thread, notify the runtime first so it can stop threads if necessary and also provide additional roots
- AUTO_NO_EXCEPTION_REGION;
- RecyclerScanMemoryCallback scanMemory(this);
- scannedRootBytes += collectionWrapper->RootMarkCallback(scanMemory, &stacksScannedByRuntime);
- }
- scannedRootBytes += FindRoots();
- if (!stacksScannedByRuntime)
- {
- // The runtime did not scan the stack(s) for us, so we use the normal Recycler code.
- scannedRootBytes += ScanStack();
- }
- this->SetCollectionState(markState);
- #if ENABLE_CONCURRENT_GC
- if (this->enableParallelMark)
- {
- this->DoParallelMark();
- }
- else
- #endif
- {
- this->ProcessMark(false);
- }
- if (this->EndMark())
- {
- // REVIEW: This heuristic doesn't apply when partial is off so there's no need
- // to modify scannedRootBytes here, correct?
- #if ENABLE_PARTIAL_GC
- // return large root scanned byte to not get into partial mode if we are low on memory
- scannedRootBytes = RecyclerSweepManager::MaxPartialCollectRescanRootBytes + 1;
- #endif
- }
- return scannedRootBytes;
- }
- bool
- Recycler::EndMarkCheckOOMRescan()
- {
- bool oomRescan = false;
- if (this->NeedOOMRescan())
- {
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (this->objectGraphDumper)
- {
- // Do not complete the mark if we are just dumping the object graph
- // Just report out of memory
- this->objectGraphDumper->isOutOfMemory = true;
- this->ClearNeedOOMRescan();
- }
- else
- #endif
- {
- EndMarkOnLowMemory();
- oomRescan = true;
- }
- }
- // Done with the mark stack, it should be empty.
- // Release pages it is holding.
- Assert(!HasPendingMarkObjects());
- Assert(!HasPendingTrackObjects());
- return oomRescan;
- }
- bool
- Recycler::EndMark()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- {
- // We have finished marking
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->EndMarkCallback();
- }
- bool oomRescan = EndMarkCheckOOMRescan();
- if (ProcessObjectBeforeCollectCallbacks())
- {
- // callbacks may trigger additional marking, need to check OOMRescan again
- oomRescan |= EndMarkCheckOOMRescan();
- }
- // GC-CONSIDER: Consider keeping some page around
- GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_START, (this));
- // Clean up mark contexts, which will release held free pages
- // Do this for all contexts before we decommit, to make sure all pages are freed
- markContext.Cleanup();
- parallelMarkContext1.Cleanup();
- parallelMarkContext2.Cleanup();
- parallelMarkContext3.Cleanup();
- // Decommit all pages
- markContext.DecommitPages();
- parallelMarkContext1.DecommitPages();
- parallelMarkContext2.DecommitPages();
- parallelMarkContext3.DecommitPages();
- GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_STOP, (this));
- return oomRescan;
- }
- void
- Recycler::EndMarkOnLowMemory()
- {
- GCETW(GC_ENDMARKONLOWMEMORY_START, (this));
- Assert(this->NeedOOMRescan());
- this->inEndMarkOnLowMemory = true;
- // Treat this as a concurrent mark reset so that we don't invalidate the allocators
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("OOM during mark- rerunning mark\n"));
- // Try to release as much memory as possible
- autoHeap.DecommitNow();
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- uint iterations = 0;
- #endif
- do
- {
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- #if ENABLE_CONCURRENT_GC
- // Always queue tracked objects during rescan, to avoid changes to mark state.
- // (Unless we're in a partial, in which case we ignore tracked objects)
- Assert(!this->DoQueueTrackedObject());
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- this->StartQueueTrackedObject();
- }
- #endif
- this->SetCollectionState(CollectionStateRescanFindRoots);
- this->ClearNeedOOMRescan();
- #if DBG
- Assert(!this->isProcessingRescan);
- this->isProcessingRescan = true;
- #endif
- if (!heapBlockMap.OOMRescan(this))
- {
- // Kill the process- we couldn't even rescan a single block
- // We are in pretty low memory state at this point
- // The fail-fast is present for two reasons:
- // 1) Defense-in-depth for cases we hadn't thought about
- // 2) Deal with cases like -MaxMarkStackPageCount:1 which can still hang without the fail-fast
- MarkStack_OOM_unrecoverable_error();
- }
- autoHeap.Rescan(RescanFlags_None);
- DebugOnly(this->isProcessingRescan = false);
- this->ProcessMark(false);
- #if ENABLE_CONCURRENT_GC
- // Process any tracked objects we found
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- ProcessTrackedObjects();
- }
- #endif
- // Drain the mark stack
- ProcessMark(false);
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- iterations++;
- #endif
- }
- while (this->NeedOOMRescan());
- Assert(!markContext.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext1.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext2.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext3.GetPageAllocator()->DisableAllocationOutOfMemory());
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::RecyclerPhase, _u("EndMarkOnLowMemory iterations: %d\n"), iterations);
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- this->inEndMarkOnLowMemory = false;
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- this->FinishPartialCollect();
- }
- #endif
- GCETW(GC_ENDMARKONLOWMEMORY_STOP, (this));
- }
- #if DBG
- bool
- Recycler::IsMarkStackEmpty()
- {
- return (markContext.IsEmpty() && parallelMarkContext1.IsEmpty() && parallelMarkContext2.IsEmpty() && parallelMarkContext3.IsEmpty());
- }
- #endif
- #ifdef HEAP_ENUMERATION_VALIDATION
- void
- Recycler::PostHeapEnumScan(PostHeapEnumScanCallback callback, void *data)
- {
- this->pfPostHeapEnumScanCallback = callback;
- this->postHeapEnunScanData = data;
- FindRoots();
- ProcessMark(false);
- this->pfPostHeapEnumScanCallback = NULL;
- this->postHeapEnunScanData = NULL;
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- bool
- Recycler::QueueTrackedObject(FinalizableObject * trackableObject)
- {
- return markContext.AddTrackedObject(trackableObject);
- }
- #endif
- bool
- Recycler::FindImplicitRootObject(void* candidate, RecyclerHeapObjectInfo& heapObject)
- {
- HeapBlock* heapBlock = FindHeapBlock(candidate);
- if (heapBlock == nullptr)
- {
- return false;
- }
- if (heapBlock->GetHeapBlockType() < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount)
- {
- return ((SmallHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- else if (!heapBlock->IsLargeHeapBlock())
- {
- return ((MediumHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- else
- {
- return ((LargeHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- }
- bool
- Recycler::FindHeapObject(void* candidate, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject)
- {
- HeapBlock* heapBlock = FindHeapBlock(candidate);
- return heapBlock && heapBlock->FindHeapObject(candidate, this, flags, heapObject);
- }
- bool
- Recycler::FindHeapObjectWithClearedAllocators(void* candidate, RecyclerHeapObjectInfo& heapObject)
- {
- // Heap enum has some case where it allocates, so we can't assert
- Assert(autoHeap.AllocatorsAreEmpty() || this->isHeapEnumInProgress);
- return FindHeapObject(candidate, FindHeapObjectFlags_ClearedAllocators, heapObject);
- }
- void*
- Recycler::GetRealAddressFromInterior(void* candidate)
- {
- HeapBlock * heapBlock = heapBlockMap.GetHeapBlock(candidate);
- if (heapBlock == NULL)
- {
- return NULL;
- }
- return heapBlock->GetRealAddressFromInterior(candidate);
- }
- /*------------------------------------------------------------------------------------------------
- * Sweep
- *------------------------------------------------------------------------------------------------*/
- #if ENABLE_PARTIAL_GC
- bool
- Recycler::Sweep(size_t rescanRootBytes, bool concurrent, bool adjustPartialHeuristics)
- #else
- bool
- Recycler::Sweep(bool concurrent)
- #endif
- {
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- Assert(!this->hasBackgroundFinishPartial);
- #endif
- #if ENABLE_CONCURRENT_GC
- if (!this->enableConcurrentSweep)
- #endif
- {
- concurrent = false;
- }
- RECYCLER_PROFILE_EXEC_BEGIN(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
- #if ENABLE_PARTIAL_GC
- recyclerSweepManagerInstance.BeginSweep(this, rescanRootBytes, adjustPartialHeuristics);
- #else
- recyclerSweepManagerInstance.BeginSweep(this);
- #endif
- this->SweepHeap(concurrent, *recyclerSweepManager);
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- // If we finished mark in the background, all the relevant write watches should already be reset
- // Only reset write watch if we didn't finish mark in the background
- if (this->backgroundFinishMarkCount == 0)
- {
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
- if (!autoHeap.ResetWriteWatch())
- {
- // Shouldn't happen
- Assert(false);
- // Disable partial collect
- this->enablePartialCollect = false;
- // We haven't done any partial collection yet, just get out of partial collect mode
- this->inPartialCollectMode = false;
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
- }
- #endif
- }
- #endif
- }
- }
- else
- #endif
- {
- recyclerSweepManager->FinishSweep();
- recyclerSweepManager->EndSweep();
- }
- RECYCLER_PROFILE_EXEC_END(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
- this->SetCollectionState(CollectionStatePostSweepRedeferralCallback);
- // Note that PostSweepRedeferralCallback can't have exception escape.
- collectionWrapper->PostSweepRedeferralCallBack();
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- bool needForceForground = !StartConcurrent(CollectionStateConcurrentSweep);
- if(needForceForground)
- {
- // Failed to spawn the concurrent sweep.
- // Instead, force the concurrent sweep to happen right here in thread.
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- this->allowAllocationsDuringConcurrentSweepForCollection = false;
- }
- #endif
- this->SetCollectionState(CollectionStateConcurrentSweep);
- DoBackgroundWork(true);
- // Continue as if the concurrent sweep were executing
- // Next time we check for completion, we will finish the sweep just as if it had happened out of thread.
- }
- #ifdef ENABLE_JS_ETW
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_StartedConcurrent;
- #endif
- return true;
- }
- #endif
- #ifdef ENABLE_JS_ETW
- // The false below just means we don't need a concurrent sweep as we have completed a sweep above.
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Completed;
- #endif
- return false;
- }
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- void Recycler::DisplayMemStats()
- {
- #ifdef PERF_COUNTERS
- #if DBG_DUMP
- Output::Print(_u("Recycler Live Object Count %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectCounter().GetValue());
- Output::Print(_u("Recycler Live Object Size %u\n"), PerfCounter::RecyclerCounterSet::GetLiveObjectSizeCounter().GetValue());
- #endif
- Output::Print(_u("Recycler Used Page Size %u\n"), PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(PageAllocatorType::PageAllocatorType_Recycler).GetValue());
- #endif
- }
- #endif
- CollectedRecyclerWeakRefHeapBlock CollectedRecyclerWeakRefHeapBlock::Instance;
- void
- Recycler::SweepWeakReference()
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::SweepWeakPhase);
- GCETW(GC_SWEEP_WEAKREF_START, (this));
- // REVIEW: Clean up the weak reference map concurrently?
- bool hasCleanup = false;
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- uint scannedCount = weakReferenceMap.Count();
- #endif
- weakReferenceMap.Map([&hasCleanup](RecyclerWeakReferenceBase * weakRef) -> bool
- {
- if (!weakRef->weakRefHeapBlock->TestObjectMarkedBit(weakRef))
- {
- hasCleanup = true;
- // Remove
- return false;
- }
- if (!weakRef->strongRefHeapBlock->TestObjectMarkedBit(weakRef->strongRef))
- {
- hasCleanup = true;
- weakRef->strongRef = nullptr;
- // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
- weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
- // Remove
- return false;
- }
- return true;
- });
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- uint regionScannedCount = 0;
- uint regionClearedCount = 0;
- #endif
- #if ENABLE_WEAK_REFERENCE_REGIONS
- auto edIt = this->weakReferenceRegionList.GetEditingIterator();
- while (edIt.Next())
- {
- RecyclerWeakReferenceRegion region = edIt.Data();
- // We want to see if user code has any reference to the region, if not, we can free the whole thing
- if (!region.GetHeapBlock()->TestObjectMarkedBit(region.GetPtr()))
- {
- edIt.RemoveCurrent();
- hasCleanup = true;
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- regionClearedCount += (uint)region.GetCount();
- #endif
- continue;
- }
- // The region is referenced, clean up any stale weak references
- RecyclerWeakReferenceRegionItem<void*>* refs = region.GetPtr();
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- regionScannedCount += (uint)region.GetCount();
- #endif
- for (size_t i = 0; i < region.GetCount(); ++i)
- {
- RecyclerWeakReferenceRegionItem<void*> &ref = refs[i];
- if (ref.ptr == nullptr)
- {
- continue;
- }
- if (((uintptr_t)ref.heapBlock & 0x1) == 0x1)
- {
- // Background thread marked this ref. Unmark it, and keep it
- ref.heapBlock = (HeapBlock*)((uintptr_t)ref.heapBlock & ~0x1);
- continue;
- }
- if (ref.heapBlock == nullptr)
- {
- HeapBlock* block = this->FindHeapBlock(ref.ptr);
- if (block == nullptr)
- {
- // This is not a real reference
- AssertMsg(false, "WeakReferenceRegionItems should only contain recycler references");
- continue;
- }
- else
- {
- ref.heapBlock = block;
- }
- }
- if (!ref.heapBlock->TestObjectMarkedBit(ref))
- {
- ref.ptr = nullptr;
- ref.heapBlock = nullptr;
- hasCleanup = true;
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- regionClearedCount++;
- #endif
- }
- }
- }
- #endif
- this->weakReferenceCleanupId += hasCleanup;
- #if defined(GCETW) && defined(ENABLE_JS_ETW)
- const uint keptCount = weakReferenceMap.Count();
- GCETW(GC_SWEEP_WEAKREF_STOP_EX, (this, scannedCount, (scannedCount - keptCount), regionScannedCount, regionClearedCount));
- #endif
- RECYCLER_PROFILE_EXEC_END(this, Js::SweepWeakPhase);
- }
- void
- Recycler::SweepHeap(bool concurrent, RecyclerSweepManager& recyclerSweepManager)
- {
- Assert(!this->hasPendingDeleteGuestArena);
- Assert(!this->isHeapEnumInProgress);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- if (concurrent)
- {
- SetCollectionState(CollectionStateSetupConcurrentSweep);
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- autoHeap.StartQueueZeroPage();
- }
- #endif
- }
- else
- #endif
- {
- Assert(!concurrent);
- SetCollectionState(CollectionStateSweep);
- }
- this->SweepWeakReference();
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- GCETW(GC_SETUPBACKGROUNDSWEEP_START, (this));
- }
- else
- #endif
- {
- GCETW(GC_SWEEP_START, (this));
- }
- autoHeap.FinalizeAndSweep(recyclerSweepManager, concurrent);
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- autoHeap.StopQueueZeroPage();
- }
- #endif
- GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
- }
- else
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- Assert(!autoHeap.HasZeroQueuedPages());
- }
- #endif
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_SWEEP_STOP, (this, sweptBytes));
- }
- #endif
- }
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- void
- Recycler::BackgroundFinishPartialCollect(RecyclerSweepManager * recyclerSweepManager)
- {
- Assert(this->inPartialCollectMode);
- Assert(recyclerSweepManager != nullptr && recyclerSweepManager->IsBackground());
- this->hasBackgroundFinishPartial = true;
- this->autoHeap.FinishPartialCollect(recyclerSweepManager);
- this->inPartialCollectMode = false;
- }
- #endif
- void
- Recycler::DisposeObjects()
- {
- Assert(this->allowDispose && this->hasDisposableObject && !this->inDispose);
- Assert(!isHeapEnumInProgress);
- GCETW(GC_DISPOSE_START, (this));
- ASYNC_HOST_OPERATION_START(collectionWrapper);
- this->inDispose = true;
- #ifdef PROFILE_RECYCLER_ALLOC
- // finalizer may allocate memory and dispose object can happen in the middle of allocation
- // save and restore the tracked object info
- TrackAllocData oldAllocData = { 0 };
- if (trackerDictionary != nullptr)
- {
- oldAllocData = nextAllocData;
- nextAllocData.Clear();
- }
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("Disposing objects\n"));
- }
- #endif
- // Disable dispose within this method, restore it when we're done
- AutoRestoreValue<bool> disableDispose(&this->allowDispose, false);
- #ifdef FAULT_INJECTION
- this->collectionWrapper->DisposeScriptContextByFaultInjectionCallBack();
- #endif
- this->collectionWrapper->PreDisposeObjectsCallBack();
- // Scope timestamp to just dispose
- {
- AUTO_TIMESTAMP(dispose);
- autoHeap.DisposeObjects();
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty());
- nextAllocData = oldAllocData;
- }
- #endif
- #ifdef ENABLE_PROJECTION
- {
- Assert(!this->inResolveExternalWeakReferences);
- Assert(!this->allowDispose);
- #if DBG || defined RECYCLER_TRACE
- AutoRestoreValue<bool> inResolveExternalWeakReferencedObjects(&this->inResolveExternalWeakReferences, true);
- #endif
- AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
- // This is where it is safe to resolve external weak references as they can lead to new script entry
- collectionWrapper->ResolveExternalWeakReferencedObjects();
- }
- #endif
- Assert(!this->inResolveExternalWeakReferences);
- Assert(this->inDispose);
- this->inDispose = false;
- ASYNC_HOST_OPERATION_END(collectionWrapper);
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_DISPOSE_STOP, (this, sweptBytes));
- }
- bool
- Recycler::FinishDisposeObjects()
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] AllowDispose in FinishDisposeObject: %d\n"), this->allowDispose);
- if (this->hasDisposableObject && this->allowDispose)
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] FinishDisposeObject, calling Dispose: %d\n"), this->allowDispose);
- #ifdef RECYCLER_TRACE
- CollectionParam savedCollectionParam = collectionParam;
- #endif
- DisposeObjects();
- #ifdef RECYCLER_TRACE
- collectionParam = savedCollectionParam;
- #endif
- // FinishDisposeObjects is always called either during a collection,
- // or we will check the NeedExhaustiveRepeatCollect(), so no need to check it here
- return true;
- }
- #ifdef RECYCLER_TRACE
- if (!this->inDispose && this->hasDisposableObject
- && GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s %d\n"), this->mainThreadId, this, _u("Dispose object delayed"), static_cast<CollectionState>(this->collectionState));
- }
- #endif
- return false;
- }
- template bool Recycler::FinishDisposeObjectsNow<FinishDispose>();
- template bool Recycler::FinishDisposeObjectsNow<FinishDisposeTimed>();
- template <CollectionFlags flags>
- bool
- Recycler::FinishDisposeObjectsNow()
- {
- if (inDisposeWrapper)
- {
- return false;
- }
- return FinishDisposeObjectsWrapped<flags>();
- }
- template <CollectionFlags flags>
- inline
- bool
- Recycler::FinishDisposeObjectsWrapped()
- {
- const BOOL allowDisposeFlag = flags & CollectOverride_AllowDispose;
- if (allowDisposeFlag)
- {
- // Disposing objects can have reentrancy, make sure there is no reentrancy lock when calling Dispose
- DebugOnly(collectionWrapper->CheckJsReentrancyOnDispose());
- if (this->NeedDispose())
- {
- if ((flags & CollectHeuristic_TimeIfScriptActive) == CollectHeuristic_TimeIfScriptActive)
- {
- if (!this->NeedDisposeTimed())
- {
- return false;
- }
- }
- this->allowDispose = true;
- this->inDisposeWrapper = true;
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Process delayed dispose object"));
- }
- #endif
- collectionWrapper->DisposeObjects(this);
- // Dispose may get into message loop and cause a reentrant GC. If those don't allow reentrant
- // it will get added to a pending collect request.
- // FinishDisposedObjectsWrapped/DisposeObjectsWrapped is called at a place that might not be during a collection
- // and won't check NeedExhaustiveRepeatCollect(), need to check it here to honor those requests
- if (!this->CollectionInProgress() && NeedExhaustiveRepeatCollect() && ((flags & CollectOverride_NoExhaustiveCollect) != CollectOverride_NoExhaustiveCollect))
- {
- #ifdef RECYCLER_TRACE
- CaptureCollectionParam((CollectionFlags)(flags & ~CollectMode_Partial), true);
- #endif
- DoCollectWrapped((CollectionFlags)(flags & ~CollectMode_Partial));
- }
- this->inDisposeWrapper = false;
- return true;
- }
- }
- return false;
- }
- /*------------------------------------------------------------------------------------------------
- * Collect
- *------------------------------------------------------------------------------------------------*/
- BOOL
- Recycler::CollectOnAllocatorThread()
- {
- #if ENABLE_PARTIAL_GC
- Assert(!inPartialCollectMode);
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::GarbageCollectPhase);
- #endif
- this->CollectionBegin<Js::GarbageCollectPhase>();
- this->Mark();
- // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
- // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
- // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
- // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
- // turned off, it will remain off until a concurrent GC happens
- this->Sweep();
- this->CollectionEnd<Js::GarbageCollectPhase>();
- FinishCollection();
- return true;
- }
- // Explicitly instantiate all possible modes
- template BOOL Recycler::CollectNow<CollectOnScriptIdle>();
- template BOOL Recycler::CollectNow<CollectOnScriptExit>();
- template BOOL Recycler::CollectNow<CollectOnAllocation>();
- template BOOL Recycler::CollectNow<CollectOnTypedArrayAllocation>();
- template BOOL Recycler::CollectNow<CollectOnScriptCloseNonPrimary>();
- template BOOL Recycler::CollectNow<CollectExhaustiveCandidate>();
- template BOOL Recycler::CollectNow<CollectNowConcurrent>();
- template BOOL Recycler::CollectNow<CollectNowExhaustive>();
- template BOOL Recycler::CollectNow<CollectNowDecommitNowExplicit>();
- template BOOL Recycler::CollectNow<CollectNowPartial>();
- template BOOL Recycler::CollectNow<CollectNowConcurrentPartial>();
- template BOOL Recycler::CollectNow<CollectNowForceInThread>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternal>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalNoStack>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalExhaustive>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalExhaustiveNoStack>();
- template BOOL Recycler::CollectNow<CollectOnRecoverFromOutOfMemory>();
- template BOOL Recycler::CollectNow<CollectNowDefault>();
- template BOOL Recycler::CollectNow<CollectOnSuspendCleanup>();
- template BOOL Recycler::CollectNow<CollectNowDefaultLSCleanup>();
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- template BOOL Recycler::CollectNow<CollectNowFinalGC>();
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- template BOOL Recycler::CollectNow<CollectNowExhaustiveSkipStack>();
- #endif
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectNow()
- {
- // Force-in-thread cannot be concurrent or partial
- CompileAssert((flags & CollectOverride_ForceInThread) == 0 || (flags & (CollectMode_Concurrent | CollectMode_Partial)) == 0);
- // Collections not allowed when the recycler is currently executing the PostCollectionCallback
- if (this->IsAllocatableCallbackState())
- {
- return false;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if ((disableCollection && (flags & CollectOverride_Explicit) == 0) || isShuttingDown)
- #else
- if (isShuttingDown)
- #endif
- {
- Assert(collectionState == CollectionStateNotCollecting
- || collectionState == CollectionStateExit
- || this->isShuttingDown);
- return false;
- }
- if (flags & CollectOverride_ExhaustiveCandidate)
- {
- return CollectWithExhaustiveCandidate<flags>();
- }
- return CollectInternal<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::GetPartialFlag()
- {
- #if ENABLE_PARTIAL_GC
- #pragma prefast(suppress:6313, "flags is a template parameter and can be 0")
- return(flags & CollectMode_Partial) && inPartialCollectMode;
- #else
- return false;
- #endif
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectWithExhaustiveCandidate()
- {
- Assert(flags & CollectOverride_ExhaustiveCandidate);
- // Currently we don't have any exhaustive candidate that has heuristic.
- Assert((flags & CollectHeuristic_Mask & ~CollectHeuristic_Never) == 0);
- this->hasExhaustiveCandidate = true;
- if (flags & CollectHeuristic_Never)
- {
- // This is just an exhaustive candidate notification. Don't trigger a GC.
- return false;
- }
- // Continue with the GC heuristic
- return CollectInternal<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectInternal()
- {
- // CollectHeuristic_Never flag should only be used with exhaustive candidate
- Assert((flags & CollectHeuristic_Never) == 0);
- // If we're in a re-entrant state, we want to allow GC to be triggered only
- // from allocation (or trigger points with AllowReentrant). This is to minimize
- // the number of reentrant GCs
- if ((flags & CollectOverride_AllowReentrant) == 0 && this->inDispose)
- {
- return false;
- }
- #ifdef RECYCLER_TRACE
- CaptureCollectionParam(flags);
- #endif
- #if ENABLE_CONCURRENT_GC
- const BOOL concurrent = flags & CollectMode_Concurrent;
- const BOOL finishConcurrent = flags & CollectOverride_FinishConcurrent;
- // If we priority boosted, we should try to finish it every chance we get
- // Otherwise, we should finishing it if we are not doing a concurrent GC,
- // or the flags tell us to always try to finish a concurrent GC (CollectOverride_FinishConcurrent)
- if ((!concurrent || finishConcurrent || priorityBoost) && this->CollectionInProgress())
- {
- return TryFinishConcurrentCollect<flags>();
- }
- #endif
- if (flags & CollectHeuristic_Mask)
- {
- // Check some heuristics first before starting a collection
- return CollectWithHeuristic<flags>();
- }
- // Start a collection now.
- return Collect<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectWithHeuristic()
- {
- // CollectHeuristic_Never flag should only be used with exhaustive candidate
- Assert((flags & CollectHeuristic_Never) == 0);
- BOOL isScriptContextCloseGCPending = FALSE;
- const BOOL allocSize = flags & CollectHeuristic_AllocSize;
- const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
- const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
- const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (flags & CollectHeuristic_Time);
- if ((flags & CollectOverride_CheckScriptContextClose) != 0)
- {
- isScriptContextCloseGCPending = this->collectionWrapper->GetIsScriptContextCloseGCPending();
- }
- // If there is a script context close GC pending, we need to do a GC regardless
- // Otherwise, we should check the heuristics to see if a GC is necessary
- if (!isScriptContextCloseGCPending)
- {
- #if ENABLE_PARTIAL_GC
- if (GetPartialFlag<flags>())
- {
- Assert(enablePartialCollect);
- Assert(allocSize);
- Assert(this->uncollectedNewPageCountPartialCollect >= RecyclerSweepManager::MinPartialUncollectedNewPageCount
- && this->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
- // PARTIAL-GC-REVIEW: For now, we have only alloc size heuristic
- // Maybe improve this heuristic by looking at how many free pages are in the page allocator.
- if (autoHeap.uncollectedNewPageCount > this->uncollectedNewPageCountPartialCollect)
- {
- #ifdef ENABLE_JS_ETW
- if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason))
- {
- collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Partial_GC_AllocSize_Heuristic;
- }
- #endif
- return Collect<flags>();
- }
- }
- #endif
- // allocation byte count heuristic, collect every 1 MB allocated
- if (allocSize && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::UncollectedAllocBytesCollection()))
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- // time heuristic, allocate every 1000 clock tick, or 64 MB is allocated in a short time
- if (timed && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::Instance.MaxUncollectedAllocBytes))
- {
- uint currentTickCount = GetTickCount();
- #ifdef RECYCLER_TRACE
- collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
- #endif
- if ((int)(tickCountNextCollection - currentTickCount) >= 0)
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- }
- #ifdef RECYCLER_TRACE
- else
- {
- uint currentTickCount = GetTickCount();
- collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
- }
- #endif
- }
- // Passed all the heuristic, do some GC work, maybe
- return Collect<(CollectionFlags)(flags & ~CollectMode_Partial)>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::Collect()
- {
- #if ENABLE_CONCURRENT_GC
- if (this->CollectionInProgress())
- {
- // If we are forced in thread, we can't be concurrent
- // If we are not concurrent we should have been handled before in CollectInternal and we shouldn't be here
- Assert((flags & CollectOverride_ForceInThread) == 0);
- Assert((flags & CollectMode_Concurrent) != 0);
- return TryFinishConcurrentCollect<flags>();
- }
- #endif
- // We clear the flag indicating that there is a GC pending because
- // of script context close, since we're about to do a GC anyway,
- // since the current GC will suffice.
- this->collectionWrapper->ClearIsScriptContextCloseGCPending();
- SetupPostCollectionFlags<flags>();
- const BOOL partial = GetPartialFlag<flags>();
- CollectionFlags finalFlags = flags;
- if (!partial)
- {
- finalFlags = (CollectionFlags)(flags & ~CollectMode_Partial);
- }
- // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
- // in SetupPostCollectionFlags because we didn't reset the inExhausitvECollection variable if
- // an exception. Use this flag to disable it the assertion if exception occur
- DebugOnly(this->hasIncompleteDoCollect = true);
- {
- RECORD_TIMESTAMP(initialCollectionStartTime);
- #ifdef NTBUILD
- this->telemetryBlock->initialCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
- this->telemetryBlock->exhaustiveRepeatedCount = 0;
- #endif
- return DoCollectWrapped(finalFlags);
- }
- }
- template <CollectionFlags flags>
- void Recycler::SetupPostCollectionFlags()
- {
- // If we are not in a collection (collection in progress or in dispose), inExhaustiveCollection should not be set
- // Otherwise, we have missed an exhaustive collection.
- Assert(this->hasIncompleteDoCollect ||
- this->CollectionInProgress() || this->inDispose || (!this->inExhaustiveCollection && !this->inDecommitNowCollection));
- // Record whether we want to start exhaustive detection or do decommit now after GC
- const BOOL exhaustive = flags & CollectMode_Exhaustive;
- const BOOL decommitNow = flags & CollectMode_DecommitNow;
- const BOOL cacheCleanup = flags & CollectMode_CacheCleanup;
- if (decommitNow)
- {
- this->inDecommitNowCollection = true;
- }
- if (exhaustive)
- {
- this->inExhaustiveCollection = true;
- }
- if (cacheCleanup)
- {
- this->inCacheCleanupCollection = true;
- }
- }
- BOOL
- Recycler::DoCollectWrapped(CollectionFlags flags)
- {
- #if ENABLE_CONCURRENT_GC
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
- DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
- #endif
- this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
- BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
- #if ENABLE_CONCURRENT_GC
- Assert(IsConcurrentExecutingState() || IsConcurrentSweepState() || IsConcurrentFinishedState() || !CollectionInProgress());
- #else
- Assert(!CollectionInProgress());
- #endif
- return collected;
- }
- bool
- Recycler::NeedExhaustiveRepeatCollect() const
- {
- return this->inExhaustiveCollection && this->hasExhaustiveCandidate;
- }
- BOOL
- Recycler::DoCollect(CollectionFlags flags)
- {
- // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
- // in SetupPostCollectionFlags because we didn't reset the inExhaustiveCollection variable if
- // an exception. We are not in DoCollect, there shouldn't be any more exception. Reset the flag
- DebugOnly(this->hasIncompleteDoCollect = false);
- #ifdef RECYCLER_MEMORY_VERIFY
- this->Verify(Js::RecyclerPhase);
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- this->VerifyFinalize();
- #endif
- #if ENABLE_PARTIAL_GC
- BOOL partial = flags & CollectMode_Partial;
- #if DBG && defined(RECYCLER_DUMP_OBJECT_GRAPH)
- // Can't pass in RecyclerPartialStress and DumpObjectGraphOnCollect or call CollectGarbage with DumpObjectGraph
- if (GetRecyclerFlagsTable().RecyclerPartialStress) {
- Assert(!GetRecyclerFlagsTable().DumpObjectGraphOnCollect && !this->dumpObjectOnceOnCollect);
- } else if (GetRecyclerFlagsTable().DumpObjectGraphOnCollect || this->dumpObjectOnceOnCollect) {
- Assert(!GetRecyclerFlagsTable().RecyclerPartialStress);
- }
- #endif
- #ifdef RECYCLER_STRESS
- if (partial && GetRecyclerFlagsTable().RecyclerPartialStress)
- {
- this->inPartialCollectMode = true;
- this->forcePartialScanStack = true;
- }
- #endif
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (dumpObjectOnceOnCollect || GetRecyclerFlagsTable().DumpObjectGraphOnCollect)
- {
- DumpObjectGraph();
- dumpObjectOnceOnCollect = false;
- #if ENABLE_PARTIAL_GC
- // Can't do a partial collect if DumpObjectGraph is set since it'll call FinishPartial
- // which will set inPartialCollectMode to false.
- partial = false;
- #endif
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- const bool concurrent = (flags & CollectMode_Concurrent) != 0;
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- #else
- const bool concurrent = false;
- #endif
- // Flush the pending dispose objects first if dispose is allowed
- Assert(!this->CollectionInProgress());
- #if ENABLE_CONCURRENT_GC
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- bool collected = FinishDisposeObjects();
- do
- {
- INC_TIMESTAMP_FIELD(exhaustiveRepeatedCount);
- RECORD_TIMESTAMP(currentCollectionStartTime);
- #ifdef NTBUILD
- this->telemetryBlock->currentCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
- #endif
- #if ENABLE_CONCURRENT_GC
- // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentState());
- Assert(collected);
- if (forceInThread)
- {
- return this->FinishConcurrentCollect(flags);
- }
- return true;
- }
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- #ifdef ENABLE_JS_ETW
- this->collectionStartFlags = flags;
- if (flags == CollectOnScriptIdle)
- {
- collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_IdleCollect;
- }
- const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
- const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
- if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason) && (flags & CollectHeuristic_Mask))
- {
- if (timedIfScriptActive)
- {
- collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSizeIfScriptActive_Heuristic;
- }
- else if (timedIfInScript)
- {
- collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSizeIfInScript_Heuristic;
- }
- else
- {
- collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_TimeAndAllocSize_Heuristic;
- }
- }
- if (IS_UNKNOWN_GC_TRIGGER(collectionStartReason))
- {
- this->collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_NoHeuristic;
- }
- #endif
- #if DBG || defined RECYCLER_TRACE
- collectionCount++;
- #endif
- this->SetCollectionState(Collection_PreCollection);
- collectionWrapper->PreCollectionCallBack(flags);
- this->SetCollectionState(CollectionStateNotCollecting);
- hasExhaustiveCandidate = false; // reset the candidate detection
- #ifdef RECYCLER_STATS
- #if ENABLE_PARTIAL_GC
- RecyclerCollectionStats oldCollectionStats = collectionStats;
- #endif
- memset(&collectionStats, 0, sizeof(RecyclerCollectionStats));
- this->collectionStats.startCollectAllocBytes = autoHeap.uncollectedAllocBytes;
- #if ENABLE_PARTIAL_GC
- this->collectionStats.startCollectNewPageCount = autoHeap.uncollectedNewPageCount;
- this->collectionStats.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
- #endif
- #endif
- #if ENABLE_PARTIAL_GC
- if (partial)
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!forceInThread);
- #endif
- #ifdef RECYCLER_STATS
- // We are only doing a partial GC, copy some old stats
- collectionStats.finalizeCount = oldCollectionStats.finalizeCount;
- memcpy(collectionStats.heapBlockCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
- memcpy(collectionStats.heapBlockFreeByteCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
- memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
- memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedBytes, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
- #endif
- Assert(enablePartialCollect && inPartialCollectMode);
- if (!this->PartialCollect(concurrent))
- {
- return collected;
- }
- // This disable partial if we do a repeated exhaustive GC
- partial = false;
- collected = true;
- continue;
- }
- // Not doing partial collect, we should decommit on finish collect
- decommitOnFinish = true;
- if (inPartialCollectMode)
- {
- // finish the partial collect first
- FinishPartialCollect();
- // Old heap block with free object is made available, count that as being collected
- collected = true;
- // PARTIAL-GC-CONSIDER: should we just pretend we did a GC, since we have made the free listed object
- // available to be used, instead of starting off another GC?
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- bool skipConcurrent = false;
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // If the below flag is passed in, skip doing a non-blocking concurrent collect. Instead,
- // we will do a blocking concurrent collect, which is basically an in-thread GC
- skipConcurrent = GetRecyclerFlagsTable().ForceBlockingConcurrentCollect;
- #endif
- // We are about to start a collection. Reset our heuristic counters now, so that
- // any allocations that occur during concurrent collection count toward the next collection's threshold.
- ResetHeuristicCounters();
- if (concurrent && !skipConcurrent)
- {
- Assert(!forceInThread);
- if (enableConcurrentMark)
- {
- if (StartBackgroundMarkCollect())
- {
- // Tell the caller whether we have finish a collection and there maybe free object to reuse
- return collected;
- }
- // Either ResetWriteWatch failed or the thread service failed
- // So concurrent mark is disabled, at least for now
- }
- if (enableConcurrentSweep)
- {
- if (StartConcurrentSweepCollect())
- {
- collected = true;
- continue;
- }
- // out of memory during collection
- return collected;
- }
- // concurrent collection failed, default back to non-concurrent collection
- }
- if (!forceInThread && enableConcurrentMark)
- {
- if (!CollectOnConcurrentThread())
- {
- // time out or out of memory during collection
- return collected;
- }
- }
- else
- #endif
- {
- if (!CollectOnAllocatorThread())
- {
- // out of memory during collection
- return collected;
- }
- }
- collected = true;
- #ifdef RECYCLER_TRACE
- collectionParam.repeat = true;
- #endif
- }
- while (this->NeedExhaustiveRepeatCollect());
- #if ENABLE_CONCURRENT_GC
- // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentState());
- Assert(collected);
- return true;
- }
- #endif
- EndCollection();
- // Tell the caller whether we have finish a collection and there maybe free object to reuse
- return collected;
- }
- void
- Recycler::EndCollection()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- Assert(!this->CollectionInProgress());
- // no more collection is requested, we can turn exhaustive back off
- this->inExhaustiveCollection = false;
- if (this->inDecommitNowCollection || CUSTOM_CONFIG_FLAG(GetRecyclerFlagsTable(), ForceDecommitOnCollect))
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Decommit now"));
- }
- #endif
- autoHeap.DecommitNow();
- this->inDecommitNowCollection = false;
- }
- RECORD_TIMESTAMP(lastCollectionEndTime);
- }
- #if ENABLE_PARTIAL_GC
- bool
- Recycler::PartialCollect(bool concurrent)
- {
- Assert(IsMarkStackEmpty());
- Assert(this->inPartialCollectMode);
- Assert(collectionState == CollectionStateNotCollecting);
- // Rescan again
- this->SetCollectionState(CollectionStateRescanFindRoots);
- #if ENABLE_CONCURRENT_GC
- if (concurrent && enableConcurrentMark && this->partialConcurrentNextCollection)
- {
- this->PrepareBackgroundFindRoots();
- if (StartConcurrent(CollectionStateConcurrentFinishMark))
- {
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentPartialCollectPhase);
- #endif
- return false;
- }
- this->RevertPrepareBackgroundFindRoots();
- }
- #endif
- #ifdef RECYCLER_STRESS
- if (forcePartialScanStack)
- {
- // Mark the roots since they need not have been marked
- // in RecyclerPartialStress mode
- this->RootMark(collectionState);
- }
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::PartialCollectPhase);
- #endif
- bool needConcurrentSweep = false;
- this->CollectionBegin<Js::PartialCollectPhase>();
- size_t rescanRootBytes = FinishMark(INFINITE);
- Assert(rescanRootBytes != Recycler::InvalidScanRootBytes);
- needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
- this->CollectionEnd<Js::PartialCollectPhase>();
- // Only reset the new page counter
- autoHeap.uncollectedNewPageCount = 0;
- // Finish collection
- FinishCollection(needConcurrentSweep);
- return true;
- }
- void
- Recycler::ProcessClientTrackedObjects()
- {
- GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_START, (this));
- Assert(this->inPartialCollectMode);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- if (!this->clientTrackedObjectList.Empty())
- {
- SListBase<void *>::Iterator iter(&this->clientTrackedObjectList);
- while (iter.Next())
- {
- auto& reference = iter.Data();
- this->TryMarkNonInterior(reference, &reference /* parentReference */); // Reference to inside the node
- RECYCLER_STATS_INC(this, clientTrackedObjectCount);
- }
- this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- }
- GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_STOP, (this));
- }
- void
- Recycler::ClearPartialCollect()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- this->autoHeap.unusedPartialCollectFreeBytes = 0;
- this->partialUncollectedAllocBytes = 0;
- this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- this->uncollectedNewPageCountPartialCollect = (size_t)-1;
- }
- void
- Recycler::FinishPartialCollect(RecyclerSweepManager * recyclerSweepManager)
- {
- Assert(recyclerSweepManager == nullptr || !recyclerSweepManager->IsBackground());
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FinishPartialPhase);
- Assert(inPartialCollectMode);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- autoHeap.FinishPartialCollect(recyclerSweepManager);
- this->inPartialCollectMode = false;
- ClearPartialCollect();
- RECYCLER_PROFILE_EXEC_END(this, Js::FinishPartialPhase);
- }
- #endif
- void
- Recycler::EnsureNotCollecting()
- {
- #if ENABLE_CONCURRENT_GC
- FinishConcurrent<ForceFinishCollection>();
- #endif
- Assert(!this->CollectionInProgress());
- }
- void Recycler::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
- {
- // Make sure we are not collecting
- EnsureNotCollecting();
- #if ENABLE_PARTIAL_GC
- // We are updating the free bit vector, messing up the partial collection state.
- // Just get out of partial collect mode
- // GC-CONSIDER: consider adding an option in FinishConcurrent to not get into partial collect mode during sweep.
- if (inPartialCollectMode)
- {
- FinishPartialCollect();
- }
- #endif
- autoHeap.EnumerateObjects(infoBits, CallBackFunction);
- // GC-TODO: Explicit heap?
- }
- BOOL
- Recycler::IsMarkState() const
- {
- return (collectionState & Collection_Mark);
- }
- BOOL
- Recycler::IsFindRootsState() const
- {
- return (collectionState & Collection_FindRoots);
- }
- #if DBG
- BOOL
- Recycler::IsReentrantState() const
- {
- #if ENABLE_CONCURRENT_GC
- return !this->CollectionInProgress() || this->IsConcurrentState();
- #else
- return !this->CollectionInProgress();
- #endif
- }
- #endif
- #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
- template <Js::Phase phase> static ETWEventGCActivationKind GetETWEventGCActivationKind();
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::GarbageCollectPhase>() { return ETWEvent_GarbageCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ThreadCollectPhase>() { return ETWEvent_ThreadCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ConcurrentCollectPhase>() { return ETWEvent_ConcurrentCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::PartialCollectPhase>() { return ETWEvent_PartialCollect; }
- #endif
- template <Js::Phase phase>
- void
- Recycler::CollectionBegin()
- {
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase, phase);
- GCETW_INTERNAL(GC_START, (this, GetETWEventGCActivationKind<phase>()));
- GCETW_INTERNAL(GC_START2, (this, GetETWEventGCActivationKind<phase>(), this->collectionStartReason, this->collectionStartFlags));
- }
- template <Js::Phase phase>
- void
- Recycler::CollectionEnd()
- {
- GCETW_INTERNAL(GC_STOP, (this, GetETWEventGCActivationKind<phase>()));
- GCETW_INTERNAL(GC_STOP2, (this, GetETWEventGCActivationKind<phase>(), this->collectionFinishReason, this->collectionStartFlags));
- RECYCLER_PROFILE_EXEC_END2(this, phase, Js::RecyclerPhase);
- }
- #if ENABLE_CONCURRENT_GC
- size_t
- Recycler::BackgroundRescan(RescanFlags rescanFlags)
- {
- Assert(!this->isProcessingRescan);
- DebugOnly(this->isProcessingRescan = true);
- GCETW(GC_BACKGROUNDRESCAN_START, (this, backgroundRescanCount));
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRescanPhase);
- #if GLOBAL_ENABLE_WRITE_BARRIER
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- pendingWriteBarrierBlockMap.LockResize();
- pendingWriteBarrierBlockMap.Map([](void* address, size_t size)
- {
- RecyclerWriteBarrierManager::WriteBarrier(address, size);
- });
- pendingWriteBarrierBlockMap.UnlockResize();
- }
- #endif
- size_t rescannedPageCount = heapBlockMap.Rescan(this, ((rescanFlags & RescanFlags_ResetWriteWatch) != 0));
- rescannedPageCount += autoHeap.Rescan(rescanFlags);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRescanPhase);
- GCETW(GC_BACKGROUNDRESCAN_STOP, (this, backgroundRescanCount));
- this->backgroundRescanCount++;
- if (!this->NeedOOMRescan())
- {
- if ((rescanFlags & RescanFlags_ResetWriteWatch) != 0)
- {
- DebugOnly(this->isProcessingRescan = false);
- }
- return rescannedPageCount;
- }
- DebugOnly(this->isProcessingRescan = false);
- return Recycler::InvalidScanRootBytes;
- }
- void
- Recycler::BackgroundResetWriteWatchAll()
- {
- GCETW(GC_BACKGROUNDRESETWRITEWATCH_START, (this, -1));
- heapBlockMap.ResetDirtyPages(this);
- GCETW(GC_BACKGROUNDRESETWRITEWATCH_STOP, (this, -1));
- }
- #endif
- size_t
- Recycler::FinishMarkRescan(bool background)
- {
- #if !ENABLE_CONCURRENT_GC
- Assert(!background);
- #endif
- if (background)
- {
- GCETW(GC_BACKGROUNDRESCAN_START, (this, 0));
- }
- else
- {
- GCETW(GC_RESCAN_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::RescanPhase);
- #if ENABLE_CONCURRENT_GC
- RescanFlags const flags = (background ? RescanFlags_ResetWriteWatch : RescanFlags_None);
- #else
- Assert(!background);
- RescanFlags const flags = RescanFlags_None;
- #endif
- #if DBG
- Assert(!this->isProcessingRescan);
- this->isProcessingRescan = true;
- #endif
- #if ENABLE_CONCURRENT_GC
- size_t scannedPageCount = heapBlockMap.Rescan(this, ((flags & RescanFlags_ResetWriteWatch) != 0));
- scannedPageCount += autoHeap.Rescan(flags);
- #else
- size_t scannedPageCount = 0;
- #endif
- DebugOnly(this->isProcessingRescan = false);
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::RescanPhase);
- if (background)
- {
- GCETW(GC_BACKGROUNDRESCAN_STOP, (this, 0));
- }
- else
- {
- GCETW(GC_RESCAN_STOP, (this));
- }
- return scannedPageCount;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::ProcessTrackedObjects()
- {
- GCETW(GC_PROCESS_TRACKED_OBJECT_START, (this));
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- Assert(!this->inPartialCollectMode);
- #endif
- Assert(this->DoQueueTrackedObject());
- this->queueTrackedObject = false;
- DebugOnly(this->isProcessingTrackedObjects = true);
- markContext.ProcessTracked();
- // If we did a parallel mark, we need to process any queued tracked objects from the parallel mark stack as well.
- // If we didn't, this will do nothing.
- parallelMarkContext1.ProcessTracked();
- parallelMarkContext2.ProcessTracked();
- parallelMarkContext3.ProcessTracked();
- DebugOnly(this->isProcessingTrackedObjects = false);
- GCETW(GC_PROCESS_TRACKED_OBJECT_STOP, (this));
- }
- #endif
- BOOL
- Recycler::RequestConcurrentWrapperCallback()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!IsConcurrentExecutingState() && !IsConcurrentSweepState());
- // Save the original collection state
- CollectionState oldState = this->collectionState;
- // Get the background thread to start the callback
- if (StartConcurrent(CollectionStateConcurrentWrapperCallback))
- {
- // Wait for the callback to complete
- WaitForConcurrentThread(INFINITE, RecyclerWaitReason::RequestConcurrentCallbackWrapper);
- // The state must not change back until we restore the original state
- Assert(collectionState == CollectionStateConcurrentWrapperCallback);
- this->SetCollectionState(oldState);
- return true;
- }
- #endif
- return false;
- }
- #if ENABLE_CONCURRENT_GC
- /*------------------------------------------------------------------------------------------------
- * Concurrent
- *------------------------------------------------------------------------------------------------*/
- BOOL
- Recycler::CollectOnConcurrentThread()
- {
- #if ENABLE_PARTIAL_GC
- Assert(!inPartialCollectMode);
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ThreadCollectPhase);
- #endif
- this->CollectionBegin<Js::ThreadCollectPhase>();
- // Synchronous concurrent mark
- if (!StartSynchronousBackgroundMark())
- {
- this->CollectionEnd<Js::ThreadCollectPhase>();
- return false;
- }
- const DWORD waitTime = RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
- GCETW(GC_SYNCHRONOUSMARKWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::CollectOnConcurrentThread);
- GCETW(GC_SYNCHRONOUSMARKWAIT_STOP, (this, !waited));
- if (!waited)
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ThreadCollectPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s: %s\n"), this->mainThreadId, this, Js::PhaseNames[Js::ThreadCollectPhase], _u("Timeout"));
- }
- #endif
- #ifdef ENABLE_JS_ETW
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_FailedTimeout;
- #endif
- this->CollectionEnd<Js::ThreadCollectPhase>();
- return false;
- }
- // If the concurrent thread was done within the time limit, there shouldn't be
- // any object needs to be rescanned
- // CONCURRENT-TODO: Optimize it so we don't rescan in the background if we are still waiting
- // GC-TODO: Unfortunately we can't assert this, as the background code gen thread may still
- // touch GC memory (e.g. FunctionBody), causing write watch and rescan
- // in the background.
- // Assert(markContext.Empty());
- DebugOnly(this->isProcessingRescan = false);
- this->SetCollectionState(CollectionStateMark);
- this->ProcessTrackedObjects();
- this->ProcessMark(false);
- this->EndMark();
- // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
- // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
- // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
- // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
- // turned off, it will remain off until a concurrent GC happens
- this->Sweep();
- this->CollectionEnd<Js::ThreadCollectPhase>();
- FinishCollection();
- return true;
- }
- // explicit instantiation
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdle>();
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdleAtRoot>();
- template BOOL Recycler::FinishConcurrent<FinishConcurrentDefault>();
- template BOOL Recycler::FinishConcurrent<ForceFinishCollection>();
- template <CollectionFlags flags>
- BOOL
- Recycler::FinishConcurrent()
- {
- CompileAssert((flags & ~(CollectOverride_AllowDispose | CollectOverride_ForceFinish | CollectOverride_ForceInThread
- | CollectMode_Concurrent | CollectOverride_DisableIdleFinish | CollectOverride_BackgroundFinishMark
- | CollectOverride_SkipStack | CollectOverride_FinishConcurrentTimeout)) == 0);
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentEnabled());
- Assert(IsConcurrentState());
- const BOOL forceFinish = flags & CollectOverride_ForceFinish;
- if (forceFinish || !IsConcurrentExecutingState())
- {
- #if ENABLE_BACKGROUND_PAGE_FREEING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- if (this->IsConcurrentSweepState())
- {
- // Help with the background thread to zero and flush zero pages
- // if we are going to wait anyways.
- autoHeap.ZeroQueuedPages();
- autoHeap.FlushBackgroundPages();
- }
- }
- #endif
- #ifdef RECYCLER_TRACE
- collectionParam.finishOnly = true;
- collectionParam.flags = flags;
- #endif
- #if ENABLE_CONCURRENT_GC
- // If SkipStack is provided, and we're not forcing the finish (i.e we're not in concurrent executing state)
- // then, it's fine to set the skipStack flag to true, so that during the in-thread find-roots, we'll skip
- // the stack scan
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0) && !forceFinish;
- #if DBG
- this->isFinishGCOnIdle = (flags == FinishConcurrentOnIdleAtRoot);
- #endif
- #endif
- return FinishConcurrentCollectWrapped(flags);
- }
- }
- return false;
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::TryFinishConcurrentCollect()
- {
- Assert(this->CollectionInProgress());
- RECYCLER_STATS_INC(this, finishCollectTryCount);
- SetupPostCollectionFlags<flags>();
- const BOOL concurrent = flags & CollectMode_Concurrent;
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- Assert(this->IsConcurrentEnabled());
- Assert(IsConcurrentState() || IsCollectionDisabled());
- Assert(!concurrent || !forceInThread);
- if (concurrent && concurrentThread != NULL)
- {
- if (IsConcurrentExecutingState())
- {
- if (!this->priorityBoost)
- {
- uint tickCount = GetTickCount();
- if ((autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
- || (tickCount - this->tickCountStartConcurrent > RecyclerHeuristic::PriorityBoostTimeout(this->GetRecyclerFlagsTable())))
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s: "), this->mainThreadId, this, _u("Set priority normal"));
- if (autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
- {
- Output::Print(_u("AllocBytes=%d (Time=%d)\n"), autoHeap.uncollectedAllocBytes, tickCount - this->tickCountStartConcurrent);
- }
- else
- {
- Output::Print(_u("Time=%d (AllocBytes=%d\n"), tickCount - this->tickCountStartConcurrent, autoHeap.uncollectedAllocBytes);
- }
- }
- #endif
- // Set it to a large number so we don't set the thread priority again
- this->priorityBoost = true;
- // The recycler thread hasn't come back in 5 seconds
- // It either has a large object graph, or it is starving.
- // Set the priority back to normal
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- }
- return FinishDisposeObjectsWrapped<flags>();
- }
- else if ((flags & CollectOverride_FinishConcurrentTimeout) != 0)
- {
- uint tickCount = GetTickCount();
- // If we haven't gone past the time to call finish collection,
- // simply call FinishDisposeObjects and return
- // Otherwise, actually go ahead and call FinishConcurrentCollectWrapped
- // We do this only if this is a collection that allows finish concurrent to timeout
- // If not, by default, we finish the collection
- if (tickCount <= this->tickCountNextFinishCollection)
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- }
- }
- return FinishConcurrentCollectWrapped(flags);
- }
- BOOL
- Recycler::IsConcurrentMarkState() const
- {
- return (collectionState & Collection_ConcurrentMark) == Collection_ConcurrentMark;
- }
- BOOL
- Recycler::IsConcurrentMarkExecutingState() const
- {
- return (collectionState & (Collection_ConcurrentMark | Collection_ExecutingConcurrent)) == (Collection_ConcurrentMark | Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentResetMarksState() const
- {
- return collectionState == CollectionStateConcurrentResetMarks;
- }
- BOOL
- Recycler::IsInThreadFindRootsState() const
- {
- CollectionState currentCollectionState = collectionState;
- return (currentCollectionState & Collection_FindRoots) && (currentCollectionState != CollectionStateConcurrentFindRoots);
- }
- BOOL
- Recycler::IsConcurrentFindRootState() const
- {
- return collectionState == CollectionStateConcurrentFindRoots;
- }
- BOOL
- Recycler::IsConcurrentExecutingState() const
- {
- return (collectionState & Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentSweepExecutingState() const
- {
- return (collectionState & (Collection_ConcurrentSweep | Collection_ExecutingConcurrent)) == (Collection_ConcurrentSweep | Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentSweepSetupState() const
- {
- return (collectionState & CollectionStateSetupConcurrentSweep) == CollectionStateSetupConcurrentSweep;
- }
- BOOL
- Recycler::IsConcurrentSweepState() const
- {
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- return this->collectionState == CollectionStateConcurrentSweepPass1 ||
- this->collectionState == CollectionStateConcurrentSweepPass1Wait ||
- this->collectionState == CollectionStateConcurrentSweepPass2 ||
- this->collectionState == CollectionStateConcurrentSweepPass2Wait;
- }
- else
- #endif
- {
- return this->collectionState == CollectionStateConcurrentSweep;
- }
- }
- BOOL
- Recycler::IsConcurrentState() const
- {
- return (collectionState & Collection_Concurrent);
- }
- #if DBG
- BOOL
- Recycler::IsConcurrentFinishedState() const
- {
- return (collectionState & Collection_FinishConcurrent);
- }
- #endif
- bool
- Recycler::InitializeConcurrent(JsUtil::ThreadService *threadService)
- {
- try
- {
- AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_OutOfMemory);
- concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentWorkDoneEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- #if DBG_DUMP
- markContext.GetPageAllocator()->debugName = _u("ConcurrentCollect");
- #endif
- if (!threadService->HasCallback())
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- concurrentIdleDecommitEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentIdleDecommitEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- #endif
- concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentWorkReadyEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- }
- }
- catch (Js::OutOfMemoryException)
- {
- Assert(concurrentWorkReadyEvent == nullptr);
- if (concurrentWorkDoneEvent)
- {
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- }
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- return false;
- }
- return true;
- }
- #pragma prefast(suppress:6262, "Where this function is call should have ample of stack space")
- bool Recycler::AbortConcurrent(bool restoreState)
- {
- Assert(!this->CollectionInProgress() || this->IsConcurrentState());
- // In case the thread already died, wait for that too
- HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
- // Note, concurrentThread will be null if we have a threadService.
- Assert(concurrentThread != NULL || threadService->HasCallback());
- DWORD handleCount = (concurrentThread == NULL ? 1 : 2);
- DWORD ret = WAIT_OBJECT_0;
- if (this->IsConcurrentState())
- {
- this->isAborting = true;
- if (this->concurrentThread != NULL)
- {
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- ret = WaitForMultipleObjectsEx(handleCount, handle, FALSE, INFINITE, FALSE);
- this->isAborting = false;
- Assert(this->IsConcurrentFinishedState() || ret == WAIT_OBJECT_0 + 1);
- if (ret == WAIT_OBJECT_0 && restoreState)
- {
- if (collectionState == CollectionStateRescanWait)
- {
- this->ResetMarkCollectionState();
- }
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- else if (collectionState == CollectionStateConcurrentSweepPass1Wait)
- {
- // Make sure we don't do another GC after finishing this one.
- this->inExhaustiveCollection = false;
- this->FinishSweepPrep();
- this->FinishConcurrentSweepPass1();
- this->SetCollectionState(CollectionStateConcurrentSweepPass2);
- this->recyclerSweepManager->FinishSweep();
- this->FinishConcurrentSweep();
- this->recyclerSweepManager->EndBackground();
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
- this->SetCollectionState(CollectionStateTransferSweptWait);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
- // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
- // FinishConcurrentCollect can wait for it again.
- SetEvent(this->concurrentWorkDoneEvent);
- EnsureNotCollecting();
- }
- #endif
- else if (collectionState == CollectionStateTransferSweptWait)
- {
- // Make sure we don't do another GC after finishing this one.
- this->inExhaustiveCollection = false;
- // Let's just finish the sweep so that GC is in a consistent state, but don't run dispose
- // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
- // FinishConcurrentCollect can wait for it again.
- SetEvent(this->concurrentWorkDoneEvent);
- EnsureNotCollecting();
- }
- else
- {
- Assert(UNREACHED);
- }
- Assert(collectionState == CollectionStateNotCollecting);
- Assert(this->isProcessingRescan == false);
- }
- else
- {
- // If we are shutting down and the wait for concurrent thread failed we fail fast
- // to avoid any use-after-free of the objects in the HeapAllocator's private heap.
- if (!restoreState)
- {
- AssertOrFailFastMsg(ret != WAIT_FAILED, "Wait for concurrent thread failed in AbortConcurrent.");
- }
- // Even if we weren't asked to restore states, we need to clean up the pending guest arena
- CleanupPendingUnroot();
- // Also need to release any pages held by the mark stack, if we abandoned it
- markContext.Abort();
- }
- }
- Assert(!this->hasPendingDeleteGuestArena);
- return ret == WAIT_OBJECT_0;
- }
- void
- Recycler::CleanupPendingUnroot()
- {
- Assert(!this->hasPendingConcurrentFindRoot);
- if (hasPendingUnpinnedObject)
- {
- pinnedObjectMap.MapAndRemoveIf([](void * obj, PinRecord const &refCount)
- {
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- Assert(refCount != 0 || refCount.stackBackTraces == nullptr);
- #endif
- #endif
- return refCount == 0;
- });
- hasPendingUnpinnedObject = false;
- }
- if (hasPendingDeleteGuestArena)
- {
- DebugOnly(bool foundPendingDelete = false);
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- if (allocator.pendingDelete)
- {
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- DebugOnly(foundPendingDelete = true);
- }
- }
- hasPendingDeleteGuestArena = false;
- Assert(foundPendingDelete);
- }
- #if DBG
- else
- {
- DListBase<GuestArenaAllocator>::Iterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- Assert(!allocator.pendingDelete);
- }
- }
- #endif
- }
- void
- Recycler::FinalizeConcurrent(bool restoreState)
- {
- bool needCleanExitState = restoreState;
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH)
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().DumpObjectGraphOnExit;
- #endif
- #ifdef LEAK_REPORT
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
- #endif
- #ifdef CHECK_MEMORY_LEAK
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().CheckMemoryLeak;
- #endif
- bool aborted = AbortConcurrent(needCleanExitState);
- SetCollectionState(CollectionStateExit);
- if (aborted && this->concurrentThread != NULL)
- {
- // In case the thread already died, wait for that too
- HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
- SetEvent(concurrentWorkReadyEvent);
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- // In case the thread already died, wait for that too
- DWORD fRet = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- AssertOrFailFastMsg(fRet != WAIT_FAILED, "Wait for concurrent thread failed. Check handles passed to WaitForMultipleObjectsEx.");
- }
- // Shutdown parallel threads and return the handle for them so the caller can
- // close it.
- parallelThread1.Shutdown();
- parallelThread2.Shutdown();
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent != nullptr)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- if (concurrentWorkReadyEvent != NULL)
- {
- CloseHandle(concurrentWorkReadyEvent);
- concurrentWorkReadyEvent = nullptr;
- }
- if (needCleanExitState)
- {
- // We may do another marking pass to look for memory leaks;
- // Since we have shut down the concurrent thread, don't do a parallel mark.
- this->enableConcurrentMark = false;
- this->enableParallelMark = false;
- this->enableConcurrentSweep = false;
- }
- this->threadService = nullptr;
- if (concurrentThread != NULL)
- {
- CloseHandle(concurrentThread);
- this->concurrentThread = nullptr;
- }
- }
- bool
- Recycler::EnableConcurrent(JsUtil::ThreadService *threadService, bool startAllThreads)
- {
- if (this->disableConcurrent)
- {
- return false;
- }
- if (!this->InitializeConcurrent(threadService))
- {
- return false;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- this->enableConcurrentMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase);
- this->enableParallelMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase);
- this->enableConcurrentSweep = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase);
- #else
- this->enableConcurrentMark = true;
- this->enableParallelMark = true;
- this->enableConcurrentSweep = true;
- #endif
- if (this->enableParallelMark && this->maxParallelism == 1)
- {
- // Disable parallel mark if only 1 CPU
- this->enableParallelMark = false;
- }
- if (threadService->HasCallback())
- {
- this->threadService = threadService;
- return true;
- }
- else
- {
- bool startConcurrentThread = true;
- bool startedParallelThread1 = false;
- bool startedParallelThread2 = false;
- if (startAllThreads)
- {
- if (this->enableParallelMark && this->maxParallelism > 2)
- {
- if (!parallelThread1.EnableConcurrent(true))
- {
- startConcurrentThread = false;
- }
- else
- {
- startedParallelThread1 = true;
- if (this->maxParallelism > 3)
- {
- if (!parallelThread2.EnableConcurrent(true))
- {
- startConcurrentThread = false;
- }
- else
- {
- startedParallelThread2 = true;
- }
- }
- }
- }
- }
- if (startConcurrentThread)
- {
- auto concurrentThread = PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize,
- &Recycler::StaticThreadProc, this,
- PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation,
- _u("Chakra Background Recycler"));
- if (concurrentThread != PlatformAgnostic::Thread::InvalidHandle)
- {
- HANDLE concurrentThreadWin32Handle = reinterpret_cast<HANDLE>(concurrentThread);
- // Wait for recycler thread to initialize
- HANDLE handle[2] = { this->concurrentWorkDoneEvent, concurrentThreadWin32Handle };
- DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- if (ret == WAIT_OBJECT_0)
- {
- this->threadService = threadService;
- this->concurrentThread = concurrentThreadWin32Handle;
- return true;
- }
- CloseHandle(concurrentThreadWin32Handle);
- }
- }
- if (startedParallelThread1)
- {
- parallelThread1.Shutdown();
- if (startedParallelThread2)
- {
- parallelThread2.Shutdown();
- }
- }
- }
- // We failed to start a concurrent thread so we set these back to false and clean up
- this->enableConcurrentMark = false;
- this->enableParallelMark = false;
- this->enableConcurrentSweep = false;
- if (concurrentWorkReadyEvent)
- {
- CloseHandle(concurrentWorkReadyEvent);
- concurrentWorkReadyEvent = nullptr;
- }
- if (concurrentWorkDoneEvent)
- {
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- }
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- return false;
- }
- void
- Recycler::ShutdownThread()
- {
- if (this->IsConcurrentEnabled())
- {
- Assert(concurrentThread != NULL || threadService->HasCallback());
- FinalizeConcurrent(false);
- }
- }
- void
- Recycler::DisableConcurrent()
- {
- if (this->IsConcurrentEnabled())
- {
- Assert(concurrentThread != NULL || threadService->HasCallback());
- FinalizeConcurrent(true);
- this->SetCollectionState(CollectionStateNotCollecting);
- }
- }
- bool
- Recycler::StartConcurrent(CollectionState const state)
- {
- // Reset the tick count to detect if the concurrent thread is taking too long
- tickCountStartConcurrent = GetTickCount();
- CollectionState oldState = this->collectionState;
- this->SetCollectionState(state);
- if (threadService->HasCallback())
- {
- Assert(concurrentThread == NULL);
- Assert(concurrentWorkReadyEvent == NULL);
- if (!threadService->Invoke(Recycler::StaticBackgroundWorkCallback, this))
- {
- this->SetCollectionState(oldState);
- return false;
- }
- return true;
- }
- else
- {
- Assert(concurrentThread != NULL);
- Assert(concurrentWorkReadyEvent != NULL);
- SetEvent(concurrentWorkReadyEvent);
- return true;
- }
- }
- BOOL
- Recycler::StartBackgroundMarkCollect()
- {
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentMarkPhase);
- #endif
- this->CollectionBegin<Js::ConcurrentCollectPhase>();
- // Asynchronous concurrent mark
- BOOL success = StartAsynchronousBackgroundMark();
- this->CollectionEnd<Js::ConcurrentCollectPhase>();
- return success;
- }
- BOOL
- Recycler::StartBackgroundMark(bool foregroundResetMark, bool foregroundFindRoots)
- {
- Assert(!this->CollectionInProgress());
- CollectionState backgroundState = CollectionStateConcurrentResetMarks;
- bool doBackgroundFindRoots = true;
- if (foregroundResetMark || foregroundFindRoots)
- {
- // REVIEW: SWB, if there's only write barrier page change, we don't scan and mark?
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
- bool hasWriteWatch = autoHeap.ResetWriteWatch();
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
- if (!hasWriteWatch)
- {
- // Disable concurrent mark
- this->enableConcurrentMark = false;
- #ifdef ENABLE_JS_ETW
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Failed;
- #endif
- return false;
- }
- }
- #endif
- // In-thread synchronized GC on the concurrent thread
- ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_SynchronizedImplicitRoots : ResetMarkFlags_Synchronized);
- if (foregroundFindRoots)
- {
- this->SetCollectionState(CollectionStateFindRoots);
- FindRoots();
- ScanStack();
- Assert(collectionState == CollectionStateFindRoots);
- backgroundState = CollectionStateConcurrentMark;
- doBackgroundFindRoots = false;
- }
- else
- {
- // Do find roots in the background
- backgroundState = CollectionStateConcurrentFindRoots;
- }
- }
- if (doBackgroundFindRoots)
- {
- this->PrepareBackgroundFindRoots();
- }
- if (!StartConcurrent(backgroundState))
- {
- if (doBackgroundFindRoots)
- {
- this->RevertPrepareBackgroundFindRoots();
- }
- this->collectionState = CollectionStateNotCollecting;
- #ifdef ENABLE_JS_ETW
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_Failed;
- #endif
- return false;
- }
- #ifdef ENABLE_JS_ETW
- collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Status_StartedConcurrent;
- #endif
- return true;
- }
- BOOL
- Recycler::StartAsynchronousBackgroundMark()
- {
- // Debug flags to turn off background reset mark or background find roots, default to doing every concurrently
- return StartBackgroundMark(CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundResetMarksPhase), CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundFindRootsPhase));
- }
- BOOL
- Recycler::StartSynchronousBackgroundMark()
- {
- return StartBackgroundMark(true, true);
- }
- BOOL
- Recycler::StartConcurrentSweepCollect()
- {
- Assert(collectionState == CollectionStateNotCollecting);
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentSweepPhase);
- #endif
- this->CollectionBegin<Js::ConcurrentCollectPhase>();
- this->Mark();
- // We don't have rescan data if we disabled concurrent mark, assume the worst
- // (which means it is harder to get into partial collect mode)
- #if ENABLE_PARTIAL_GC
- bool needConcurrentSweep = this->Sweep(RecyclerSweepManager::MaxPartialCollectRescanRootBytes, true, true);
- #else
- bool needConcurrentSweep = this->Sweep(true);
- #endif
- this->CollectionEnd<Js::ConcurrentCollectPhase>();
- FinishCollection(needConcurrentSweep);
- return true;
- }
- size_t
- Recycler::BackgroundRepeatMark()
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRepeatMarkPhase);
- Assert(this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount - 1);
- size_t rescannedPageCount = this->BackgroundRescan(RescanFlags_ResetWriteWatch);
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return Recycler::InvalidScanRootBytes;
- }
- // Rescan the stack
- this->BackgroundScanStack();
- // Process mark stack
- this->DoBackgroundParallelMark();
- if (this->NeedOOMRescan())
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return Recycler::InvalidScanRootBytes;
- }
- #ifdef RECYCLER_STATS
- Assert(this->backgroundRescanCount >= 1 && this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount);
- this->collectionStats.backgroundMarkData[this->backgroundRescanCount - 1] = this->collectionStats.markData;
- #endif
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return rescannedPageCount;
- }
- char* Recycler::GetScriptThreadStackTop()
- {
- // We should have already checked if the recycler is thread bound or not
- Assert(mainThreadHandle != NULL);
- return (char*) savedThreadContext.GetStackTop();
- }
- size_t
- Recycler::BackgroundScanStack()
- {
- if (this->skipStack)
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
- #endif
- return 0;
- }
- if (!this->isInScript || mainThreadHandle == nullptr)
- {
- // No point in scanning the main thread's stack if we are not in script
- // We also can't scan the main thread's stack if we are not thread bounded, and didn't create the main thread's handle
- return 0;
- }
- char* stackTop = this->GetScriptThreadStackTop();
- if (stackTop != nullptr)
- {
- size_t size = (char *)stackBase - stackTop;
- ScanMemoryInline<false>((void **)stackTop, size
- ADDRESS_SANITIZER_APPEND(RecyclerScanMemoryType::Stack));
- return size;
- }
- return 0;
- }
- void
- Recycler::BackgroundMark()
- {
- Assert(this->DoQueueTrackedObject());
- this->backgroundRescanCount = 0;
- this->DoBackgroundParallelMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- return;
- }
- #ifdef RECYCLER_STATS
- this->collectionStats.backgroundMarkData[0] = this->collectionStats.markData;
- #endif
- if (PHASE_OFF1(Js::BackgroundRepeatMarkPhase))
- {
- return;
- }
- // We always do one repeat mark pass.
- size_t rescannedPageCount = this->BackgroundRepeatMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- return;
- }
- Assert(rescannedPageCount != Recycler::InvalidScanRootBytes);
- // If we rescanned enough pages in the previous repeat mark pass, then do one more
- // to try to reduce the amount of work we need to do in-thread
- if (rescannedPageCount >= RecyclerHeuristic::BackgroundSecondRepeatMarkThreshold)
- {
- this->BackgroundRepeatMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- return;
- }
- }
- }
- void
- Recycler::BackgroundMarkWeakRefs()
- {
- #if ENABLE_WEAK_REFERENCE_REGIONS
- auto iterator = this->weakReferenceRegionList.GetIterator();
- while (iterator.Next())
- {
- RecyclerWeakReferenceRegion region = iterator.Data();
- RecyclerWeakReferenceRegionItem<void*> *items = region.GetPtr();
- size_t count = region.GetCount();
- for (size_t index = 0; index < count; ++index)
- {
- RecyclerWeakReferenceRegionItem<void*> &item = items[index];
- if (item.ptr == nullptr)
- {
- continue;
- }
- if (((uintptr_t)item.heapBlock & 0x1) == 0x1)
- {
- // This weak reference is already marked
- continue;
- }
- if (item.heapBlock == nullptr)
- {
- item.heapBlock = this->FindHeapBlock(item.ptr);
- if (item.heapBlock == nullptr)
- {
- // This isn't a real weak reference, ignore it
- continue;
- }
- }
- if (item.heapBlock->TestObjectMarkedBit(item.ptr))
- {
- item.heapBlock = (HeapBlock*) ((uintptr_t)item.heapBlock | 0x1);
- }
- }
- }
- #endif
- }
- void
- Recycler::BackgroundResetMarks()
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundResetMarksPhase);
- GCETW(GC_BACKGROUNDRESETMARKS_START, (this));
- Assert(IsMarkStackEmpty());
- this->scanPinnedObjectMap = true;
- this->hasScannedInitialImplicitRoots = false;
- heapBlockMap.ResetMarks();
- autoHeap.ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InBackgroundThreadImplicitRoots : ResetMarkFlags_InBackgroundThread);
- GCETW(GC_BACKGROUNDRESETMARKS_STOP, (this));
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundResetMarksPhase);
- }
- void
- Recycler::PrepareBackgroundFindRoots()
- {
- Assert(!this->hasPendingConcurrentFindRoot);
- this->hasPendingConcurrentFindRoot = true;
- // Save the thread context here. The background thread
- // will use this saved context for the marking instead of
- // trying to get the live thread context of the thread
- SAVE_THREAD_CONTEXT();
- // Temporarily disable resize so the background can scan without
- // the memory being freed from under it
- pinnedObjectMap.DisableResize();
- // Update the cached info for big blocks in the guest arena
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- allocator.SetLockBlockList(true);
- if (allocator.pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- }
- else if (this->backgroundFinishMarkCount == 0)
- {
- // Update the cached info for big block
- allocator.GetBigBlocks(false);
- }
- }
- this->hasPendingDeleteGuestArena = false;
- }
- void
- Recycler::RevertPrepareBackgroundFindRoots()
- {
- Assert(this->hasPendingConcurrentFindRoot);
- this->hasPendingConcurrentFindRoot = false;
- pinnedObjectMap.EnableResize();
- }
- size_t
- Recycler::BackgroundFindRoots()
- {
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- size_t scanRootBytes = 0;
- Assert(this->IsConcurrentFindRootState());
- Assert(this->hasPendingConcurrentFindRoot);
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- // Only mark pinned object and guest arenas, which is where most of the roots are.
- // When we go back to the main thread to rescan, we will scan the rest of the root.
- // NOTE: purposefully not marking the transientPinnedObject there. as it is transient :)
- // background mark the pinned object. Since we are in concurrent find root state
- // the main thread won't delete any entries from the map, so concurrent read
- // to the map safe.
- GCETW(GC_BACKGROUNDSCANROOTS_START, (this));
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundFindRootsPhase);
- scanRootBytes += this->ScanPinnedObjects</*background = */true>();
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::FindRootArenaPhase);
- // background mark the guest arenas. Since we are in concurrent find root state
- // the main thread won't delete any arena, so concurrent reads to them are ok.
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- if (allocator.pendingDelete)
- {
- // Skip guest arena that are already marked for delete
- Assert(this->hasPendingDeleteGuestArena);
- continue;
- }
- scanRootBytes += ScanArena(&allocator, true);
- }
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::FindRootArenaPhase);
- this->ScanImplicitRoots();
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundFindRootsPhase);
- this->hasPendingConcurrentFindRoot = false;
- this->SetCollectionState(CollectionStateConcurrentMark);
- GCETW(GC_BACKGROUNDSCANROOTS_STOP, (this));
- RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
- return scanRootBytes;
- }
- size_t
- Recycler::BackgroundFinishMark()
- {
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- Assert(collectionState == CollectionStateConcurrentFinishMark);
- size_t rescannedRootBytes = FinishMarkRescan(true) * AutoSystemInfo::PageSize;
- this->SetCollectionState(CollectionStateConcurrentFindRoots);
- rescannedRootBytes += this->BackgroundFindRoots();
- this->SetCollectionState(CollectionStateConcurrentFinishMark);
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::MarkPhase);
- ProcessMark(true);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::MarkPhase);
- return rescannedRootBytes;
- }
- void
- Recycler::SweepPendingObjects(RecyclerSweepManager& recyclerSweepManager)
- {
- autoHeap.SweepPendingObjects(recyclerSweepManager);
- }
- void
- Recycler::ConcurrentTransferSweptObjects(RecyclerSweepManager& recyclerSweepManager)
- {
- Assert(!recyclerSweepManager.IsBackground());
- Assert((this->collectionState & Collection_TransferSwept) == Collection_TransferSwept);
- #if ENABLE_PARTIAL_GC
- if (this->hasBackgroundFinishPartial)
- {
- this->hasBackgroundFinishPartial = false;
- this->ClearPartialCollect();
- }
- #endif
- autoHeap.ConcurrentTransferSweptObjects(recyclerSweepManager);
- }
- #if ENABLE_PARTIAL_GC
- void
- Recycler::ConcurrentPartialTransferSweptObjects(RecyclerSweepManager& recyclerSweepManager)
- {
- Assert(!recyclerSweepManager.IsBackground());
- Assert(!this->hasBackgroundFinishPartial);
- autoHeap.ConcurrentPartialTransferSweptObjects(recyclerSweepManager);
- }
- #endif
- BOOL
- Recycler::FinishConcurrentCollectWrapped(CollectionFlags flags)
- {
- this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
- #if ENABLE_CONCURRENT_GC
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
- DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
- #endif
- BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::FinishConcurrentCollect, flags);
- return collected;
- }
- /**
- * Compute ft1 - ft2, return result as a uint64
- */
- uint64 DiffFileTimes(LPFILETIME ft1, LPFILETIME ft2)
- {
- ULARGE_INTEGER ul1;
- ULARGE_INTEGER ul2;
- ul1.HighPart = ft1->dwHighDateTime;
- ul1.LowPart = ft1->dwLowDateTime;
- ul2.HighPart = ft2->dwHighDateTime;
- ul2.LowPart = ft2->dwLowDateTime;
- ULONGLONG result = ul1.QuadPart - ul2.QuadPart;
- return result;
- }
- BOOL
- Recycler::WaitForConcurrentThread(DWORD waitTime, RecyclerWaitReason caller)
- {
- Assert(this->IsConcurrentState() || this->collectionState == CollectionStateParallelMark);
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ConcurrentWaitPhase);
- if (concurrentThread != NULL)
- {
- // Set the priority back to normal before we wait to ensure it doesn't starve
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- #ifdef ENABLE_BASIC_TELEMETRY
- bool isBlockingMainThread = false;
- Js::Tick start;
- FILETIME kernelTime1;
- FILETIME userTime1;
- HANDLE hProcess = GetCurrentProcess();
- if (this->telemetryStats.ShouldStartTelemetryCapture())
- {
- isBlockingMainThread = this->telemetryStats.IsOnScriptThread();
- if (isBlockingMainThread)
- {
- start = Js::Tick::Now();
- FILETIME creationTime;
- FILETIME exitTime;
- GetProcessTimes(hProcess, &creationTime, &exitTime, &kernelTime1, &userTime1);
- }
- }
- #endif
- DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, waitTime);
- #ifdef ENABLE_BASIC_TELEMETRY
- if (isBlockingMainThread)
- {
- Js::Tick end = Js::Tick::Now();
- Js::TickDelta elapsed = end - start;
- FILETIME creationTime;
- FILETIME exitTime;
- FILETIME kernelTime2;
- FILETIME userTime2;
- GetProcessTimes(hProcess, &creationTime, &exitTime, &kernelTime2, &userTime2);
- uint64 kernelTime = DiffFileTimes(&kernelTime2 , &kernelTime1);
- uint64 userTime = DiffFileTimes(&userTime2, &userTime1);
- // userTime & kernelTime reported from GetProcessTimes is the number of 100-nanosecond ticks
- // for consistency convert to microseconds.
- kernelTime = kernelTime / 10;
- userTime = userTime / 10;
- this->telemetryStats.IncrementUserThreadBlockedCount(elapsed.ToMicroseconds(), caller);
- this->telemetryStats.IncrementUserThreadBlockedCpuTimeUser(userTime, caller);
- this->telemetryStats.IncrementUserThreadBlockedCpuTimeKernel(kernelTime, caller);
- }
- #endif
- if (concurrentThread != NULL)
- {
- if (ret == WAIT_TIMEOUT)
- {
- // Keep the priority boost.
- priorityBoost = true;
- }
- else
- {
- Assert(ret == WAIT_OBJECT_0);
- // Back to below normal
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_BELOW_NORMAL);
- priorityBoost = false;
- }
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::ConcurrentWaitPhase);
- return (ret == WAIT_OBJECT_0);
- }
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- AutoProtectPages::AutoProtectPages(Recycler* recycler, bool protectEnabled) :
- isReadOnly(false),
- recycler(recycler)
- {
- if (protectEnabled)
- {
- recycler->heapBlockMap.MakeAllPagesReadOnly(recycler);
- isReadOnly = true;
- }
- }
- AutoProtectPages::~AutoProtectPages()
- {
- Unprotect();
- }
- void AutoProtectPages::Unprotect()
- {
- if (isReadOnly)
- {
- recycler->heapBlockMap.MakeAllPagesReadWrite(recycler);
- isReadOnly = false;
- }
- }
- #endif
- BOOL
- Recycler::FinishConcurrentCollect(CollectionFlags flags)
- {
- if (!this->IsConcurrentState())
- {
- Assert(false);
- return false;
- }
- #ifdef PROFILE_EXEC
- Js::Phase concurrentPhase = Js::ConcurrentCollectPhase;
- // TODO: Remove this workaround for unreferenced local after enabled -profile for GC
- static_cast<Js::Phase>(concurrentPhase);
- #endif
- #if ENABLE_PARTIAL_GC
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
- (concurrentPhase = ((this->inPartialCollectMode && this->IsConcurrentMarkState())?
- Js::ConcurrentPartialCollectPhase : Js::ConcurrentCollectPhase)));
- #else
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
- (concurrentPhase = Js::ConcurrentCollectPhase));
- #endif
- // Don't do concurrent sweep if we have priority boosted.
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- bool concurrent = (flags & CollectMode_Concurrent) != 0;
- concurrent = concurrent && (!priorityBoost || this->backgroundRescanCount != 1);
- #ifdef RECYCLER_TRACE
- collectionParam.priorityBoostConcurrentSweepOverride = priorityBoost;
- #endif
- const DWORD waitTime = forceInThread? INFINITE : RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
- GCETW(GC_FINISHCONCURRENTWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime, RecyclerWaitReason::FinishConcurrentCollect);
- GCETW(GC_FINISHCONCURRENTWAIT_STOP, (this, !waited));
- if (!waited)
- {
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- return false;
- }
- bool needConcurrentSweep = false;
- if (collectionState == CollectionStateRescanWait)
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentRescan));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
- #ifdef RECYCLER_TRACE
- #if ENABLE_PARTIAL_GC
- PrintCollectTrace(this->inPartialCollectMode ? Js::ConcurrentPartialCollectPhase : Js::ConcurrentMarkPhase, true);
- #else
- PrintCollectTrace(Js::ConcurrentMarkPhase, true);
- #endif
- #endif
- SetCollectionState(CollectionStateRescanFindRoots);
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // TODO: Change this behavior
- // ProtectPagesOnRescan is not supported in PageHeap mode because the page protection is changed
- // outside the PageAllocator in PageHeap mode and so pages are not in the state that the
- // PageAllocator expects when it goes to change the page protection
- // One viable fix is to move the guard page protection logic outside of the heap blocks
- // and into the page allocator
- AssertMsg(!(IsPageHeapEnabled() && GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan), "ProtectPagesOnRescan not supported in page heap mode");
- AutoProtectPages protectPages(this, GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan);
- #endif
- const bool backgroundFinishMark = !forceInThread && concurrent && ((flags & CollectOverride_BackgroundFinishMark) != 0);
- const DWORD finishMarkWaitTime = RecyclerHeuristic::BackgroundFinishMarkWaitTime(backgroundFinishMark, GetRecyclerFlagsTable());
- size_t rescanRootBytes = FinishMark(finishMarkWaitTime);
- if (rescanRootBytes == Recycler::InvalidScanRootBytes)
- {
- Assert(this->IsMarkState());
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
- // we timeout trying to mark.
- return false;
- }
- #ifdef RECYCLER_STATS
- collectionStats.continueCollectAllocBytes = autoHeap.uncollectedAllocBytes;
- #endif
- #ifdef RECYCLER_VERIFY_MARK
- if (GetRecyclerFlagsTable().RecyclerVerifyMark)
- {
- this->VerifyMark();
- }
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- protectPages.Unprotect();
- #endif
- #if ENABLE_PARTIAL_GC
- needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
- #else
- needConcurrentSweep = this->Sweep(concurrent);
- #endif
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentRescan, this->collectionStartReason, this->collectionStartFlags));
- }
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- else if (collectionState == CollectionStateConcurrentSweepPass1Wait)
- {
- this->FinishSweepPrep();
- if (forceInThread)
- {
- this->FinishConcurrentSweepPass1();
- this->SetCollectionState(CollectionStateConcurrentSweepPass2);
- #ifdef RECYCLER_TRACE
- if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
- {
- Output::Print(_u("[GC #%d] Finishing Sweep Pass2 in-thread. \n"), this->collectionCount);
- }
- #endif
- this->recyclerSweepManager->FinishSweep();
- this->FinishConcurrentSweep();
- this->recyclerSweepManager->EndBackground();
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
- this->SetCollectionState(CollectionStateTransferSweptWait);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
- FinishTransferSwept(flags);
- }
- else
- {
- needConcurrentSweep = true;
- // Signal the background thread to finish concurrent sweep Pass2 for all the buckets.
- StartConcurrent(CollectionStateConcurrentSweepPass2);
- }
- }
- #endif
- else
- {
- AssertMsg(this->collectionState == CollectionStateTransferSweptWait, "Do we need to handle this state?");
- FinishTransferSwept(flags);
- }
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- FinishCollection(needConcurrentSweep);
- if (!this->CollectionInProgress())
- {
- if (NeedExhaustiveRepeatCollect())
- {
- DoCollect((CollectionFlags)(flags & ~CollectMode_Partial));
- }
- else
- {
- EndCollection();
- }
- }
- return true;
- }
- void
- Recycler::FinishTransferSwept(CollectionFlags flags)
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentTransferSwept));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentTransferSwept, this->collectionStartReason, this->collectionStartFlags));
- GCETW(GC_FLUSHZEROPAGE_START, (this));
- Assert(collectionState == CollectionStateTransferSweptWait);
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentSweepPhase, true);
- #endif
- SetCollectionState(CollectionStateTransferSwept);
- #if ENABLE_BACKGROUND_PAGE_FREEING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // We should have zeroed all the pages in the background thread
- Assert(!autoHeap.HasZeroQueuedPages());
- autoHeap.FlushBackgroundPages();
- }
- #endif
- GCETW(GC_FLUSHZEROPAGE_STOP, (this));
- GCETW(GC_TRANSFERSWEPTOBJECTS_START, (this));
- Assert(this->recyclerSweepManager != nullptr);
- Assert(!this->recyclerSweepManager->IsBackground());
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- ConcurrentPartialTransferSweptObjects(*this->recyclerSweepManager);
- }
- else
- #endif
- {
- ConcurrentTransferSweptObjects(*this->recyclerSweepManager);
- }
- recyclerSweepManager->EndSweep();
- GCETW(GC_TRANSFERSWEPTOBJECTS_STOP, (this));
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentTransferSwept));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentTransferSwept, this->collectionStartReason, this->collectionStartFlags));
- }
- #if !DISABLE_SEH
- int
- Recycler::ExceptFilter(LPEXCEPTION_POINTERS pEP)
- {
- #if DBG
- // Assert exception code
- if (pEP->ExceptionRecord->ExceptionCode == STATUS_ASSERTION_FAILURE)
- {
- return EXCEPTION_CONTINUE_SEARCH;
- }
- #endif
- #ifdef GENERATE_DUMP
- if (Js::Configuration::Global.flags.IsEnabled(Js::DumpOnCrashFlag))
- {
- Js::Throw::GenerateDump(pEP, Js::Configuration::Global.flags.DumpOnCrash);
- }
- #endif
- #if DBG && _M_IX86
- int callerEBP = *((int*)pEP->ContextRecord->Ebp);
- Output::Print(_u("Recycler Concurrent Thread: Uncaught exception: EIP: 0x%X ExceptionCode: 0x%X EBP: 0x%X ReturnAddress: 0x%X ReturnAddress2: 0x%X\n"),
- pEP->ExceptionRecord->ExceptionAddress, pEP->ExceptionRecord->ExceptionCode, pEP->ContextRecord->Eip,
- pEP->ContextRecord->Ebp, *((int*)pEP->ContextRecord->Ebp + 1), *((int*) callerEBP + 1));
- #endif
- Output::Flush();
- return EXCEPTION_CONTINUE_SEARCH;
- }
- #endif
- unsigned int
- Recycler::StaticThreadProc(LPVOID lpParameter)
- {
- DWORD ret = (DWORD)-1;
- #if !DISABLE_SEH
- __try
- {
- #endif
- Recycler * recycler = (Recycler *)lpParameter;
- #if DBG
- recycler->concurrentThreadExited = false;
- #endif
- ret = recycler->ThreadProc();
- #if !DISABLE_SEH
- }
- __except(Recycler::ExceptFilter(GetExceptionInformation()))
- {
- Assert(false);
- }
- #endif
- return ret;
- }
- void
- Recycler::StaticBackgroundWorkCallback(void * callbackData)
- {
- Recycler * recycler = (Recycler *) callbackData;
- recycler->DoBackgroundWork(true);
- }
- #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
- static ETWEventGCActivationKind
- BackgroundMarkETWEventGCActivationKind(CollectionState collectionState)
- {
- return collectionState == CollectionStateConcurrentFinishMark?
- ETWEvent_ConcurrentFinishMark : ETWEvent_ConcurrentMark;
- }
- #endif
- void
- Recycler::DoBackgroundWork(bool forceForeground)
- {
- if (this->collectionState == CollectionStateConcurrentWrapperCallback)
- {
- this->collectionWrapper->ConcurrentCallback();
- }
- else if (this->collectionState == CollectionStateParallelMark)
- {
- this->ProcessParallelMark(false, &this->markContext);
- }
- else if (this->IsConcurrentMarkState())
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, this->collectionState == CollectionStateConcurrentFinishMark?
- Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
- GCETW_INTERNAL(GC_START, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
- GCETW_INTERNAL(GC_START2, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState), this->collectionStartReason, this->collectionStartFlags));
- DebugOnly(this->markContext.GetPageAllocator()->SetConcurrentThreadId(::GetCurrentThreadId()));
- Assert(this->enableConcurrentMark);
- if (this->collectionState != CollectionStateConcurrentFinishMark)
- {
- this->StartQueueTrackedObject();
- }
- switch (this->collectionState)
- {
- case CollectionStateConcurrentResetMarks:
- this->BackgroundResetMarks();
- this->BackgroundResetWriteWatchAll();
- this->SetCollectionState(CollectionStateConcurrentFindRoots);
- // fall-through
- case CollectionStateConcurrentFindRoots:
- this->BackgroundFindRoots();
- this->BackgroundScanStack();
- this->SetCollectionState(CollectionStateConcurrentMark);
- // fall-through
- case CollectionStateConcurrentMark:
- this->BackgroundMark();
- this->collectionState = CollectionStateConcurrentMarkWeakRef;
- // fall-through
- case CollectionStateConcurrentMarkWeakRef:
- this->BackgroundMarkWeakRefs();
- Assert(this->collectionState == CollectionStateConcurrentMarkWeakRef);
- RECORD_TIMESTAMP(concurrentMarkFinishTime);
- break;
- case CollectionStateConcurrentFinishMark:
- this->backgroundRescanRootBytes = this->BackgroundFinishMark();
- Assert(!HasPendingMarkObjects());
- break;
- default:
- Assert(false);
- break;
- };
- GCETW_INTERNAL(GC_STOP, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
- GCETW_INTERNAL(GC_STOP2, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState), this->collectionStartReason, this->collectionStartFlags));
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, this->collectionState == CollectionStateConcurrentFinishMark?
- Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
- this->SetCollectionState(CollectionStateRescanWait);
- DebugOnly(this->markContext.GetPageAllocator()->ClearConcurrentThreadId());
- }
- else
- {
- Assert(this->enableConcurrentSweep);
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !forceForeground)
- {
- if (this->collectionState == CollectionStateConcurrentSweep)
- {
- this->DoTwoPassConcurrentSweepPreCheck();
- if (this->AllowAllocationsDuringConcurrentSweep())
- {
- this->SetCollectionState(CollectionStateConcurrentSweepPass1);
- }
- }
- Assert((!this->AllowAllocationsDuringConcurrentSweep() && this->collectionState == CollectionStateConcurrentSweep) || this->collectionState == CollectionStateConcurrentSweepPass1 || this->collectionState == CollectionStateConcurrentSweepPass2);
- }
- else
- #endif
- {
- Assert(this->collectionState == CollectionStateConcurrentSweep);
- }
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (this->collectionState == CollectionStateConcurrentSweepPass1 ||
- ((!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) ||!this->AllowAllocationsDuringConcurrentSweep()) && this->collectionState == CollectionStateConcurrentSweep))
- #endif
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (this->collectionState == CollectionStateConcurrentSweepPass1)
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_Pass1));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_Pass1, this->collectionStartReason, this->collectionStartFlags));
- }
- else
- #endif
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep, this->collectionStartReason, this->collectionStartFlags));
- }
- GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Zero the queued pages first so they are available to be allocated
- autoHeap.BackgroundZeroQueuedPages();
- }
- #endif
- GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
- GCETW(GC_BACKGROUNDSWEEP_START, (this));
- Assert(this->recyclerSweepManager != nullptr);
- this->recyclerSweepManager->BackgroundSweep();
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (this->collectionState == CollectionStateConcurrentSweepPass1)
- {
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_Pass1));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_Pass1, this->collectionStartReason, this->collectionStartFlags));
- }
- #endif
- // If allocations were allowed during concurrent sweep then the allocableHeapBlock lists still needs to be swept so we
- // will remain in CollectionStateConcurrentSweepPass1Wait state.
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
- {
- this->SetCollectionState(CollectionStateConcurrentSweepPass1Wait);
- }
- #endif
- }
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- if (this->collectionState == CollectionStateConcurrentSweepPass2)
- {
- #ifdef RECYCLER_TRACE
- if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
- {
- Output::Print(_u("[GC #%d] Finishing Sweep Pass2 on background thread. \n"), this->collectionCount);
- }
- #endif
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Drain the zero queue again as we might have free more during sweep
- // in the background
- GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
- autoHeap.BackgroundZeroQueuedPages();
- GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
- }
- #endif
- this->FinishConcurrentSweepPass1();
- this->recyclerSweepManager->FinishSweep();
- this->FinishConcurrentSweep();
- this->recyclerSweepManager->EndBackground();
- this->SetCollectionState(CollectionStateConcurrentSweepPass2Wait);
- }
- }
- #endif
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (this->collectionState == CollectionStateConcurrentSweepPass2Wait ||
- (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !this->AllowAllocationsDuringConcurrentSweep()))
- #endif
- {
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Drain the zero queue again as we might have free more during sweep
- // in the background
- GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
- autoHeap.BackgroundZeroQueuedPages();
- GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
- }
- #endif
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
- {
- Assert(this->collectionState == CollectionStateConcurrentSweepPass2Wait);
- }
- else
- #endif
- {
- Assert(this->collectionState == CollectionStateConcurrentSweep);
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep, this->collectionStartReason, this->collectionStartFlags));
- }
- this->SetCollectionState(CollectionStateTransferSweptWait);
- }
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
- }
- SetEvent(this->concurrentWorkDoneEvent);
- collectionWrapper->WaitCollectionCallBack();
- }
- DWORD
- Recycler::ThreadProc()
- {
- Assert(this->IsConcurrentEnabled());
- #if !defined(_UCRT)
- // We do this before we set the concurrentWorkDoneEvent because GetModuleHandleEx requires
- // getting the loader lock. We could have the following case:
- // Thread A => Initialize Concurrent Thread (C)
- // C signals Signal Done
- // C yields since its lower priority
- // Thread A starts running- and is told to shut down.
- // Thread A grabs loader lock as part of the shutdown sequence
- // Thread A waits for C to be done
- // C wakes up now- and tries to grab loader lock.
- // To prevent this deadlock, we call GetModuleHandleEx first and then set the concurrentWorkDoneEvent
- HMODULE dllHandle = NULL;
- if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&Recycler::StaticThreadProc, &dllHandle))
- {
- dllHandle = NULL;
- }
- #endif
- #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
- // LTTng has no concept of EventActivityIdControl
- // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
- GUID activityId = { 0 };
- auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
- Assert(eventActivityIdControlResult == ERROR_SUCCESS);
- #endif
- // Signal that the thread has started
- SetEvent(this->concurrentWorkDoneEvent);
- SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
- #if defined(DBG) && defined(PROFILE_EXEC)
- this->backgroundProfilerPageAllocator.SetConcurrentThreadId(::GetCurrentThreadId());
- #endif
- #ifdef IDLE_DECOMMIT_ENABLED
- DWORD handleCount = this->concurrentIdleDecommitEvent? 2 : 1;
- HANDLE handles[2] = { this->concurrentWorkReadyEvent, this->concurrentIdleDecommitEvent };
- #endif
- do
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- needIdleDecommitSignal = IdleDecommitSignal_None;
- DWORD waitTime = autoHeap.IdleDecommit();
- if (waitTime == INFINITE)
- {
- DWORD ret = ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedSignal, IdleDecommitSignal_None);
- if (ret == IdleDecommitSignal_NeedTimer)
- {
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Need Timer\n"));
- Output::Flush();
- }
- #endif
- continue;
- }
- }
- #if DBG
- else
- {
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Wait %d\n"), waitTime);
- Output::Flush();
- }
- }
- #endif
- DWORD result = WaitForMultipleObjectsEx(handleCount, handles, FALSE, waitTime, FALSE);
- if (result != WAIT_OBJECT_0)
- {
- Assert((handleCount == 2 && result == WAIT_OBJECT_0 + 1) || (waitTime != INFINITE && result == WAIT_TIMEOUT));
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- if (result == WAIT_TIMEOUT)
- {
- Output::Print(_u("Recycler Thread IdleDecommit Timeout: %d\n"), waitTime);
- }
- else
- {
- Output::Print(_u("Recycler Thread IdleDecommit Signaled\n"));
- }
- Output::Flush();
- }
- #endif
- continue;
- }
- #else
- DWORD result = WaitForSingleObject(this->concurrentWorkReadyEvent, INFINITE);
- Assert(result == WAIT_OBJECT_0);
- #endif
- if (this->collectionState == CollectionStateExit)
- {
- #if DBG
- this->concurrentThreadExited = true;
- #endif
- break;
- }
- DoBackgroundWork();
- }
- while (true);
- SetEvent(this->concurrentWorkDoneEvent);
- #if !defined(_UCRT)
- if (dllHandle)
- {
- FreeLibraryAndExitThread(dllHandle, 0);
- }
- else
- #endif
- {
- return 0;
- }
- }
- #endif //ENABLE_CONCURRENT_GC
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- void
- Recycler::DoTwoPassConcurrentSweepPreCheck()
- {
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- // We will do two pass sweep only when BOTH of the following conditions are met:
- // 1. GC was triggered while we are in script, as this is the only case when we will make use of the blocks in the
- // SLIST during concurrent sweep.
- // 2. We are not in a Partial GC.
- // 3. At-least one heap bucket exceeds the RecyclerHeuristic::AllocDuringConcurrentSweepHeapBlockThreshold.
- this->allowAllocationsDuringConcurrentSweepForCollection = this->isInScript && !this->recyclerSweepManager->InPartialCollect();
- // Do the actual 2-pass check only if the first 2 checks pass.
- if (this->allowAllocationsDuringConcurrentSweepForCollection)
- {
- // We fire the ETW event only when the actual 2-pass check is performed. This is to avoid messing up ETL processing of test runs when in partial collect.
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck, this->collectionStartReason, this->collectionStartFlags));
- this->allowAllocationsDuringConcurrentSweepForCollection = this->autoHeap.DoTwoPassConcurrentSweepPreCheck();
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_TwoPassSweepPreCheck, this->collectionStartReason, this->collectionStartFlags));
- }
- }
- }
- void
- Recycler::FinishConcurrentSweepPass1()
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishPass1));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishPass1, this->collectionStartReason, this->collectionStartFlags));
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
- this->autoHeap.FinishConcurrentSweepPass1(this->recyclerSweepManagerInstance);
- }
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishPass1));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishPass1, this->collectionStartReason, this->collectionStartFlags));
- }
- void
- Recycler::FinishSweepPrep()
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep, this->collectionStartReason, this->collectionStartFlags));
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
- this->autoHeap.FinishSweepPrep(this->recyclerSweepManagerInstance);
- }
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep, this->collectionStartReason, this->collectionStartFlags));
- }
- void
- Recycler::FinishConcurrentSweep()
- {
- #if SUPPORT_WIN32_SLIST
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep));
- GCETW_INTERNAL(GC_START2, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep, this->collectionStartReason, this->collectionStartFlags));
- if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
- {
- AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
- this->autoHeap.FinishConcurrentSweep();
- }
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep));
- GCETW_INTERNAL(GC_STOP2, (this, ETWEvent_ConcurrentSweep_FinishTwoPassSweep, this->collectionStartReason, this->collectionStartFlags));
- #endif
- }
- #endif
- void
- Recycler::FinishCollection(bool needConcurrentSweep)
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!!this->InConcurrentSweep() == needConcurrentSweep);
- #else
- Assert(!needConcurrentSweep);
- #endif
- if (!needConcurrentSweep)
- {
- FinishCollection();
- }
- else
- {
- FinishDisposeObjects();
- }
- }
- void
- Recycler::FinishCollection()
- {
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- Assert(!this->hasBackgroundFinishPartial);
- #endif
- Assert(!this->hasPendingDeleteGuestArena);
- // Reset the time heuristics
- ScheduleNextCollection();
- {
- AutoSwitchCollectionStates collectionState(this,
- /* entry state */ CollectionStatePostCollectionCallback,
- /* exit state */ CollectionStateNotCollecting);
- collectionWrapper->PostCollectionCallBack();
- }
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- // Do a partial page decommit now
- if (decommitOnFinish)
- {
- autoHeap.DecommitNow(false);
- this->decommitOnFinish = false;
- }
- RECYCLER_SLOW_CHECK(autoHeap.Check());
- #ifdef RECYCLER_MEMORY_VERIFY
- this->Verify(Js::RecyclerPhase);
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- this->VerifyFinalize();
- #endif
- #ifdef ENABLE_JS_ETW
- FlushFreeRecord();
- #endif
- FinishDisposeObjects();
- #ifdef RECYCLER_FINALIZE_CHECK
- if (!this->IsMarkState())
- {
- this->VerifyFinalize();
- }
- #endif
- #ifdef RECYCLER_STATS
- if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
- {
- PrintCollectStats();
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (MemoryProfiler::IsTraceEnabled(true))
- {
- PrintAllocStats();
- }
- #endif
- #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
- this->allowAllocationsDuringConcurrentSweepForCollection = false;
- #endif
- #if ENABLE_MEM_STATS
- autoHeap.ReportMemStats(this);
- #endif
- #ifdef ENABLE_JS_ETW
- this->collectionStartReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown;
- this->collectionFinishReason = ETWEventGCActivationTrigger::ETWEvent_GC_Trigger_Unknown;
- #endif
- RECORD_TIMESTAMP(currentCollectionEndTime);
- }
- void
- Recycler::SetExternalRootMarker(ExternalRootMarker fn, void * context)
- {
- externalRootMarker = fn;
- externalRootMarkerContext = context;
- }
- void
- Recycler::SetCollectionWrapper(RecyclerCollectionWrapper * wrapper)
- {
- this->collectionWrapper = wrapper;
- #if LARGEHEAPBLOCK_ENCODING
- this->Cookie = wrapper->GetRandomNumber();
- #else
- this->Cookie = 0;
- #endif
- }
- // TODO: (leish) remove following function? seems not make sense to re-allocate in recycler
- char *
- Recycler::Realloc(void* buffer, DECLSPEC_GUARD_OVERFLOW size_t existingBytes, DECLSPEC_GUARD_OVERFLOW size_t requestedBytes, bool truncate)
- {
- Assert(requestedBytes > 0);
- if (existingBytes == 0)
- {
- Assert(buffer == nullptr);
- return Alloc(requestedBytes);
- }
- Assert(buffer != nullptr);
- size_t nbytes = AllocSizeMath::Align(requestedBytes, HeapConstants::ObjectGranularity);
- // Since we successfully allocated, we shouldn't have integer overflow here
- size_t nbytesExisting = AllocSizeMath::Align(existingBytes, HeapConstants::ObjectGranularity);
- Assert(nbytesExisting >= existingBytes);
- if (nbytes == nbytesExisting)
- {
- return (char *)buffer;
- }
- char* replacementBuf = this->Alloc(requestedBytes);
- if (replacementBuf != nullptr)
- {
- // Truncate
- if (existingBytes > requestedBytes && truncate)
- {
- js_memcpy_s(replacementBuf, requestedBytes, buffer, requestedBytes);
- }
- else
- {
- js_memcpy_s(replacementBuf, requestedBytes, buffer, existingBytes);
- }
- }
- if (nbytesExisting > 0)
- {
- this->Free(buffer, nbytesExisting);
- }
- return replacementBuf;
- }
- bool
- Recycler::ForceSweepObject()
- {
- #ifdef RECYCLER_TEST_SUPPORT
- if (BinaryFeatureControl::RecyclerTest())
- {
- if (checkFn != nullptr)
- {
- return true;
- }
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- // Need to sweep object if we are tracing recycler allocs
- return true;
- }
- #endif
- #ifdef RECYCLER_STATS
- if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
- {
- return true;
- }
- #endif
- #if DBG
- // Force sweeping the object so we can assert that we are not sweeping objects that are still implicit roots
- if (this->enableScanImplicitRoots)
- {
- return true;
- }
- #endif
- return false;
- }
- bool
- Recycler::ShouldIdleCollectOnExit()
- {
- // Always reset partial heuristics even if we are not doing idle collecting
- // So we don't carry the heuristics to the next script activation
- this->ResetPartialHeuristicCounters();
- if (this->CollectionInProgress())
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase, _u("%04X> Skipping scheduling Idle Collect. Reason: Collection in progress\n"), ::GetCurrentThreadId());
- #endif
- // Don't schedule an idle collect if there is a collection going on already
- // IDLE-GC-TODO: Fix ResetHeuristics in the GC so we can detect memory allocation during
- // the concurrent collect and still schedule an idle collect
- return false;
- }
- if (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
- {
- return true;
- }
- uint32 nextTime = tickCountNextCollection - tickDiffToNextCollect;
- // We will try to start a concurrent collect if we are within .9 ms to next scheduled collection, AND,
- // the size of allocation is larger than 32M. This is similar to CollectionAllocation logic, just
- // earlier in both time heuristic and size heuristic, so we can do some concurrent GC while we are
- // not in script.
- if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit
- && GetTickCount() > nextTime)
- {
- #ifdef RECYCLER_TRACE
- if (CUSTOM_PHASE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
- {
- if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit)
- {
- Output::Print(_u("%04X> Idle collect on exit: alloc %d\n"), ::GetCurrentThreadId(), autoHeap.uncollectedAllocBytes);
- }
- else
- {
- Output::Print(_u("%04X> Idle collect on exit: time %d\n"), ::GetCurrentThreadId(), tickCountNextCollection - GetTickCount());
- }
- Output::Flush();
- }
- #endif
- this->CollectNow<CollectNowConcurrent>();
- return false;
- }
- Assert(!this->CollectionInProgress());
- // Idle GC use the size heuristic. Only need to schedule on if we passed it.
- return (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::IdleUncollectedAllocBytesCollection);
- }
- #if ENABLE_CONCURRENT_GC
- bool
- RecyclerParallelThread::StartConcurrent()
- {
- if (this->recycler->threadService->HasCallback())
- {
- // This may be the first time. If so, initialize by creating the doneEvent.
- if (this->concurrentWorkDoneEvent == NULL)
- {
- this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkDoneEvent == nullptr)
- {
- return false;
- }
- }
- Assert(concurrentThread == NULL);
- Assert(concurrentWorkReadyEvent == NULL);
- // Invoke thread service to process work
- if (!this->recycler->threadService->Invoke(RecyclerParallelThread::StaticBackgroundWorkCallback, this))
- {
- return false;
- }
- }
- else
- {
- // This may be the first time. If so, initialize and create thread.
- if (this->concurrentWorkDoneEvent == NULL)
- {
- return this->EnableConcurrent(false);
- }
- else
- {
- Assert(this->concurrentThread != NULL);
- Assert(this->concurrentWorkReadyEvent != NULL);
- // signal that thread has been initialized
- SetEvent(this->concurrentWorkReadyEvent);
- }
- }
- return true;
- }
- bool
- RecyclerParallelThread::EnableConcurrent(bool waitForThread)
- {
- this->synchronizeOnStartup = waitForThread;
- Assert(this->concurrentWorkDoneEvent == NULL);
- Assert(this->concurrentWorkReadyEvent == NULL);
- Assert(this->concurrentThread == NULL);
- this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkDoneEvent == nullptr)
- {
- return false;
- }
- this->concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkReadyEvent == nullptr)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- return false;
- }
- auto threadHandle = PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize,
- &RecyclerParallelThread::StaticThreadProc, this,
- PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation, _u("Chakra Recycler Parallel Thread"));
- if (threadHandle != PlatformAgnostic::Thread::InvalidHandle)
- {
- this->concurrentThread = reinterpret_cast<HANDLE>(threadHandle);
- }
- if (this->concurrentThread != nullptr && waitForThread)
- {
- // Wait for thread to initialize
- HANDLE handle[2] = { this->concurrentWorkDoneEvent, this->concurrentThread };
- DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- if (ret == WAIT_OBJECT_0)
- {
- return true;
- }
- CloseHandle(concurrentThread);
- concurrentThread = nullptr;
- }
- if (this->concurrentThread == nullptr)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- CloseHandle(this->concurrentWorkReadyEvent);
- this->concurrentWorkReadyEvent = NULL;
- return false;
- }
- return true;
- }
- template <uint parallelId>
- void
- Recycler::ParallelWorkFunc()
- {
- Assert(parallelId == 0 || parallelId == 1);
- MarkContext * markContext = (parallelId == 0 ? &this->parallelMarkContext2 : &this->parallelMarkContext3);
- switch (this->collectionState)
- {
- case CollectionStateParallelMark:
- this->ProcessParallelMark(false, markContext);
- break;
- case CollectionStateBackgroundParallelMark:
- this->ProcessParallelMark(true, markContext);
- break;
- default:
- Assert(false);
- }
- }
- void
- RecyclerParallelThread::WaitForConcurrent()
- {
- Assert(this->concurrentThread != NULL || this->recycler->threadService->HasCallback());
- Assert(this->concurrentWorkDoneEvent != NULL);
- DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, INFINITE);
- Assert(ret == WAIT_OBJECT_0);
- }
- void
- RecyclerParallelThread::Shutdown()
- {
- Assert(this->recycler->collectionState == CollectionStateExit);
- if (this->recycler->threadService->HasCallback())
- {
- if (this->concurrentWorkDoneEvent != NULL)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- }
- }
- else
- {
- if (this->concurrentThread != NULL)
- {
- HANDLE handles[2] = { concurrentWorkDoneEvent, concurrentThread };
- SetEvent(concurrentWorkReadyEvent);
- // During process shutdown, OS might kill this (recycler parallel i.e. concurrent) thread and it will not get chance to signal concurrentWorkDoneEvent.
- // When we are performing shutdown of main (recycler) thread here, if we wait on concurrentWorkDoneEvent, WaitForObject() will never return.
- // Hence wait for concurrentWorkDoneEvent + concurrentThread so if concurrentThread got killed, WaitForObject() will return and we will
- // proceed further.
- DWORD fRet = WaitForMultipleObjectsEx(2, handles, FALSE, INFINITE, FALSE);
- AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- CloseHandle(this->concurrentWorkReadyEvent);
- this->concurrentWorkReadyEvent = NULL;
- CloseHandle(this->concurrentThread);
- this->concurrentThread = NULL;
- }
- }
- Assert(this->concurrentThread == NULL);
- Assert(this->concurrentWorkReadyEvent == NULL);
- Assert(this->concurrentWorkDoneEvent == NULL);
- }
- // static
- unsigned int
- RecyclerParallelThread::StaticThreadProc(LPVOID lpParameter)
- {
- DWORD ret = (DWORD)-1;
- #if !DISABLE_SEH
- __try
- {
- #endif
- RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)lpParameter;
- Recycler * recycler = parallelThread->recycler;
- RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
- Assert(recycler->IsConcurrentEnabled());
- #if !defined(_UCRT)
- HMODULE dllHandle = NULL;
- if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&RecyclerParallelThread::StaticThreadProc, &dllHandle))
- {
- dllHandle = NULL;
- }
- #endif
- #if defined(ENABLE_JS_ETW) && ! defined(ENABLE_JS_LTTNG)
- // LTTng has no concept of EventActivityIdControl
- // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
- GUID activityId = { 0 };
- auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
- Assert(eventActivityIdControlResult == ERROR_SUCCESS);
- #endif
- // If this thread is created on demand we already have work to process and do not need to wait
- bool mustWait = parallelThread->synchronizeOnStartup;
- do
- {
- if (mustWait)
- {
- // Signal completion and wait for next work
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- DWORD result = WaitForSingleObject(parallelThread->concurrentWorkReadyEvent, INFINITE);
- Assert(result == WAIT_OBJECT_0);
- }
- if (recycler->collectionState == CollectionStateExit)
- {
- // Exit thread
- break;
- }
- // Invoke the workFunc to do real work
- (recycler->*workFunc)();
- // We always wait after the first time
- mustWait = true;
- }
- while (true);
- // Signal to main thread that we have stopped processing and will shut down.
- // Note that after this point, we cannot access anything on the Recycler instance
- // because the main thread may have torn it down already.
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- #if !defined(_UCRT)
- if (dllHandle)
- {
- FreeLibraryAndExitThread(dllHandle, 0);
- }
- #endif
- ret = 0;
- #if !DISABLE_SEH
- }
- __except(Recycler::ExceptFilter(GetExceptionInformation()))
- {
- Assert(false);
- }
- #endif
- return ret;
- }
- // static
- void
- RecyclerParallelThread::StaticBackgroundWorkCallback(void * callbackData)
- {
- RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)callbackData;
- Recycler * recycler = parallelThread->recycler;
- RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
- (recycler->*workFunc)();
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- }
- #endif
- #ifdef RECYCLER_TRACE
- void
- Recycler::CaptureCollectionParam(CollectionFlags flags, bool repeat)
- {
- collectionParam.priorityBoostConcurrentSweepOverride = false;
- collectionParam.repeat = repeat;
- collectionParam.finishOnly = false;
- collectionParam.flags = flags;
- collectionParam.uncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
- #if ENABLE_PARTIAL_GC
- collectionParam.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
- collectionParam.inPartialCollectMode = inPartialCollectMode;
- collectionParam.uncollectedNewPageCount = autoHeap.uncollectedNewPageCount;
- collectionParam.unusedPartialCollectFreeBytes = autoHeap.unusedPartialCollectFreeBytes;
- #endif
- }
- void
- Recycler::PrintCollectTrace(Js::Phase phase, bool finish, bool noConcurrentWork)
- {
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase) ||
- GetRecyclerFlagsTable().Trace.IsEnabled(phase))
- {
- const BOOL allocSize = collectionParam.flags & CollectHeuristic_AllocSize;
- const BOOL timedIfScriptActive = collectionParam.flags & CollectHeuristic_TimeIfScriptActive;
- const BOOL timedIfInScript = collectionParam.flags & CollectHeuristic_TimeIfInScript;
- const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (collectionParam.flags & CollectHeuristic_Time);
- const BOOL concurrent = collectionParam.flags & CollectMode_Concurrent;
- const BOOL finishConcurrent = collectionParam.flags & CollectOverride_FinishConcurrent;
- const BOOL exhaustive = collectionParam.flags & CollectMode_Exhaustive;
- const BOOL forceInThread = collectionParam.flags & CollectOverride_ForceInThread;
- const BOOL forceFinish = collectionParam.flags & CollectOverride_ForceFinish;
- #if ENABLE_PARTIAL_GC
- BOOL partial = collectionParam.flags & CollectMode_Partial ;
- #endif
- Output::Print(_u("%04X> RC(%p): %s%s%s%s%s%s%s:"), this->mainThreadId, this,
- collectionParam.domCollect? _u("[DOM] ") : _u(""),
- collectionParam.repeat? _u("[Repeat] "): _u(""),
- this->inDispose? _u("[Nested]") : _u(""),
- forceInThread? _u("Force In thread ") : _u(""),
- finish? _u("Finish ") : _u(""),
- exhaustive? _u("Exhaustive ") : _u(""),
- Js::PhaseNames[phase]);
- if (noConcurrentWork)
- {
- Assert(finish);
- Output::Print(_u(" No concurrent work"));
- }
- else if (collectionParam.finishOnly)
- {
- Assert(!collectionParam.repeat);
- Assert(finish);
- #if ENABLE_CONCURRENT_GC
- if (collectionState == CollectionStateRescanWait)
- {
- if (forceFinish)
- {
- Output::Print(_u(" Force finish mark and sweep"));
- }
- else if (concurrent && this->enableConcurrentSweep)
- {
- if (!collectionParam.priorityBoostConcurrentSweepOverride)
- {
- Output::Print(_u(" Finish mark and start concurrent sweep"));
- }
- else
- {
- Output::Print(_u(" Finish mark and sweep (priority boost overridden concurrent sweep)"));
- }
- }
- else
- {
- Output::Print(_u(" Finish mark and sweep"));
- }
- }
- else
- {
- Assert(collectionState == CollectionStateTransferSweptWait);
- if (forceFinish)
- {
- Output::Print(_u(" Force finish sweep"));
- }
- else
- {
- Output::Print(_u(" Finish sweep"));
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- }
- else
- {
- if (finish && !concurrent)
- {
- Output::Print(_u(" Not concurrent collect"));
- }
- if ((finish && finishConcurrent))
- {
- Output::Print(_u(" No heuristic"));
- }
- #if ENABLE_CONCURRENT_GC
- else if (finish && priorityBoost)
- {
- Output::Print(_u(" Priority boost no heuristic"));
- }
- #endif
- else
- {
- Output::SkipToColumn(50);
- bool byteCountUsed = false;
- bool timeUsed = false;
- #if ENABLE_PARTIAL_GC
- bool newPageUsed = false;
- if (phase == Js::PartialCollectPhase || phase == Js::ConcurrentPartialCollectPhase)
- {
- Assert(collectionParam.flags & CollectMode_Partial);
- newPageUsed = !!allocSize;
- }
- else if (partial && collectionParam.inPartialCollectMode && collectionParam.uncollectedNewPageCount > collectionParam.uncollectedNewPageCountPartialCollect)
- {
- newPageUsed = true;
- }
- else
- #endif // ENABLE_PARTIAL_GC
- {
- byteCountUsed = !!allocSize;
- timeUsed = !!timed;
- }
- Output::Print(byteCountUsed? _u("*") : (allocSize? _u(" ") : _u("~")));
- Output::Print(_u("B:%8d "), collectionParam.uncollectedAllocBytes);
- Output::Print(timeUsed? _u("*") : (timed? _u(" ") : _u("~")));
- Output::Print(_u("T:%4d "), -collectionParam.timeDiff);
- #if ENABLE_PARTIAL_GC
- if (collectionParam.inPartialCollectMode)
- {
- Output::Print(_u("L:%5d "), collectionParam.uncollectedNewPageCountPartialCollect);
- }
- else
- {
- Output::Print(_u("L:----- "));
- }
- Output::Print(newPageUsed? _u("*") : (partial? _u(" ") : _u("~")));
- Output::Print(_u("P:%5d(%9d) "), collectionParam.uncollectedNewPageCount, collectionParam.uncollectedNewPageCount * AutoSystemInfo::PageSize);
- Output::Print(_u("U:%8d"), collectionParam.unusedPartialCollectFreeBytes);
- #endif // ENABLE_PARTIAL_GC
- }
- }
- Output::Print(_u("\n"));
- Output::Flush();
- }
- }
- #endif
- #ifdef RECYCLER_TRACE
- void
- Recycler::PrintBlockStatus(HeapBucket * heapBucket, HeapBlock * heapBlock, char16 const * statusMessage)
- {
- if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
- {
- Output::Print(_u("[GC #%d] [HeapBucket 0x%p] HeapBlock 0x%p %s [CollectionState: %d] \n"), this->collectionCount, heapBucket, heapBlock, statusMessage, static_cast<CollectionState>(this->collectionState));
- }
- }
- #endif
- #ifdef RECYCLER_STATS
- void
- Recycler::PrintHeapBlockStats(char16 const * name, HeapBlock::HeapBlockType type)
- {
- size_t liveCount = collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type];
- Output::Print(_u(" %6s : %5d %5d %5d %5.1f"), name,
- liveCount, collectionStats.heapBlockFreeCount[type], collectionStats.heapBlockCount[type],
- (double)collectionStats.heapBlockFreeCount[type] / (double)collectionStats.heapBlockCount[type] * 100);
- if (type < HeapBlock::SmallBlockTypeCount)
- {
- Output::Print(_u(" : %5d %6.1f : %5d %6.1f"),
- collectionStats.heapBlockSweptCount[type],
- (double)collectionStats.heapBlockSweptCount[type] / (double)liveCount * 100,
- collectionStats.heapBlockConcurrentSweptCount[type],
- (double)collectionStats.heapBlockConcurrentSweptCount[type] / (double)collectionStats.heapBlockSweptCount[type] * 100);
- }
- }
- void
- Recycler::PrintHeapBlockMemoryStats(char16 const * name, HeapBlock::HeapBlockType type)
- {
- size_t allocableFreeByteCount = collectionStats.heapBlockFreeByteCount[type];
- #if ENABLE_PARTIAL_GC
- size_t partialUnusedBytes = 0;
- if (this->enablePartialCollect)
- {
- partialUnusedBytes = allocableFreeByteCount
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[type];
- allocableFreeByteCount -= partialUnusedBytes;
- }
- #endif
- size_t blockPages = type < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount ?
- SmallAllocationBlockAttributes::PageCount : MediumAllocationBlockAttributes::PageCount;
- size_t totalByteCount = (collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type]) * blockPages * AutoSystemInfo::PageSize;
- size_t liveByteCount = totalByteCount - collectionStats.heapBlockFreeByteCount[type];
- Output::Print(_u(" %6s: %10d %10d"), name, liveByteCount, allocableFreeByteCount);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect &&
- (type == HeapBlock::HeapBlockType::SmallNormalBlockType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
- #endif
- || type == HeapBlock::HeapBlockType::MediumNormalBlockType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
- #endif
- ))
- {
- Output::Print(_u(" %10d"), partialUnusedBytes);
- }
- else
- #endif
- {
- Output::Print(_u(" "));
- }
- Output::Print(_u(" %10d %6.1f"), totalByteCount,
- (double)allocableFreeByteCount / (double)totalByteCount * 100);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect &&
- (type == HeapBlock::HeapBlockType::SmallNormalBlockType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
- #endif
- || type == HeapBlock::HeapBlockType::MediumNormalBlockType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
- #endif
- ))
- {
- Output::Print(_u(" %6.1f"), (double)partialUnusedBytes / (double)totalByteCount * 100);
- }
- #endif
- }
- void
- Recycler::PrintHeuristicCollectionStats()
- {
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("GC Trigger : %10s %10s %10s"), _u("Start"), _u("Continue"), _u("Finish"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Heuristics : %10s %10s %5s"), _u(""), _u(""), _u("%"));
- }
- #endif
- Output::Print(_u("\n"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" Alloc bytes : %10d %10d %10d"), collectionStats.startCollectAllocBytes, collectionStats.continueCollectAllocBytes, this->autoHeap.uncollectedAllocBytes);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Cost : %10d %10d %5.1f"), collectionStats.rescanRootBytes, collectionStats.estimatedPartialReuseBytes, collectionStats.collectCost * 100);
- }
- #endif
- Output::Print(_u("\n"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Efficacy : %10s %10s %5.1f\n"), _u(""), _u(""), collectionStats.collectEfficacy * 100);
- }
- #endif
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" New page : %10d %10s %10d"), collectionStats.startCollectNewPageCount, _u(""), autoHeap.uncollectedNewPageCount);
- Output::Print(_u(" | Partial Uncollect New Page : %10d %10d"), collectionStats.uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize, this->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize);
- Output::Print(_u("\n"));
- }
- #endif
- Output::Print(_u(" Finish try : %10d %10s %10s"), collectionStats.finishCollectTryCount, _u(""), _u(""));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Partial Reuse Min Free Bytes : %10d"), collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes * AutoSystemInfo::PageSize);
- }
- #endif
- Output::Print(_u("\n"));
- }
- void
- Recycler::PrintMarkCollectionStats()
- {
- size_t nonMark = collectionStats.tryMarkCount + collectionStats.tryMarkInteriorCount - collectionStats.remarkCount - collectionStats.markData.markCount;
- size_t invalidCount = nonMark - collectionStats.tryMarkNullCount - collectionStats.tryMarkUnalignedCount
- - collectionStats.tryMarkNonRecyclerMemoryCount
- - collectionStats.tryMarkInteriorNonRecyclerMemoryCount
- - collectionStats.tryMarkInteriorNullCount;
- size_t leafCount = collectionStats.markData.markCount - collectionStats.scanCount;
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Try Mark :%9s %5s %10s | Non-Mark : %9s %5s | Mark :%9s %5s \n"), _u("Count"), _u("%"), _u("Bytes"), _u("Count"), _u("%"), _u("Count"), _u("%"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" TryMark :%9d %10d | Null : %9d %5.1f | Scan :%9d %5.1f\n"),
- collectionStats.tryMarkCount, collectionStats.tryMarkCount * sizeof(void *),
- collectionStats.tryMarkNullCount, (double)collectionStats.tryMarkNullCount / (double)nonMark * 100,
- collectionStats.scanCount, (double)collectionStats.scanCount / (double)collectionStats.markData.markCount * 100);
- Output::Print(_u(" Non-Mark :%9d %5.1f | Unaligned : %9d %5.1f | Leaf :%9d %5.1f\n"),
- nonMark, (double)nonMark / (double)collectionStats.tryMarkCount * 100,
- collectionStats.tryMarkUnalignedCount, (double)collectionStats.tryMarkUnalignedCount / (double)nonMark * 100,
- leafCount, (double)leafCount / (double)collectionStats.markData.markCount * 100);
- Output::Print(_u(" Mark :%9d %5.1f %10d | Non GC : %9d %5.1f | Track :%9d\n"),
- collectionStats.markData.markCount, (double)collectionStats.markData.markCount / (double)collectionStats.tryMarkCount * 100, collectionStats.markData.markBytes,
- collectionStats.tryMarkNonRecyclerMemoryCount, (double)collectionStats.tryMarkNonRecyclerMemoryCount / (double)nonMark * 100,
- collectionStats.trackCount);
- Output::Print(_u(" Remark :%9d %5.1f | Invalid : %9d %5.1f \n"),
- collectionStats.remarkCount, (double)collectionStats.remarkCount / (double)collectionStats.tryMarkCount * 100,
- invalidCount, (double)invalidCount / (double)nonMark * 100);
- Output::Print(_u(" TryMark Int:%9d %10d | Null Int : %9d %5.1f | Root :%9d | New :%9d\n"),
- collectionStats.tryMarkInteriorCount, collectionStats.tryMarkInteriorCount * sizeof(void *),
- collectionStats.tryMarkInteriorNullCount, (double)collectionStats.tryMarkInteriorNullCount / (double)nonMark * 100,
- collectionStats.rootCount, collectionStats.markThruNewObjCount);
- Output::Print(_u(" | Non GC Int: %9d %5.1f | Stack :%9d | NewFalse:%9d\n"),
- collectionStats.tryMarkInteriorNonRecyclerMemoryCount, (double)collectionStats.tryMarkInteriorNonRecyclerMemoryCount / (double)nonMark * 100,
- collectionStats.stackCount, collectionStats.markThruFalseNewObjCount);
- }
- void
- Recycler::PrintBackgroundCollectionStat(RecyclerCollectionStats::MarkData const& markData)
- {
- Output::Print(_u("BgSmall : %5d %6d %10d | BgLarge : %5d %6d %10d | BgMark :%9d "),
- markData.rescanPageCount,
- markData.rescanObjectCount,
- markData.rescanObjectByteCount,
- markData.rescanLargePageCount,
- markData.rescanLargeObjectCount,
- markData.rescanLargeByteCount,
- markData.markCount);
- double markRatio = (double)markData.markCount / (double)collectionStats.markData.markCount * 100;
- if (markRatio == 100.0)
- {
- Output::Print(_u(" 100"));
- }
- else
- {
- Output::Print(_u("%4.1f"), markRatio);
- }
- Output::Print(_u("\n"));
- }
- void
- Recycler::PrintBackgroundCollectionStats()
- {
- #if ENABLE_CONCURRENT_GC
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("BgSmall : %5s %6s %10s | BgLarge : %5s %6s %10s | BgMark :%9s %4s %s\n"),
- _u("Pages"), _u("Count"), _u("Bytes"), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"), _u("%"), _u("NonLeafBytes %"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[0]);
- for (uint repeatCount = 1; repeatCount < RecyclerHeuristic::MaxBackgroundRepeatMarkCount; repeatCount++)
- {
- if (collectionStats.backgroundMarkData[repeatCount].markCount == 0)
- {
- break;
- }
- collectionStats.backgroundMarkData[repeatCount].rescanPageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanPageCount;
- collectionStats.backgroundMarkData[repeatCount].rescanObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectCount;
- collectionStats.backgroundMarkData[repeatCount].rescanObjectByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectByteCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargePageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargePageCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargeObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeObjectCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargeByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeByteCount;
- this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[repeatCount]);
- }
- #endif
- }
- void
- Recycler::PrintMemoryStats()
- {
- Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Memory (Bytes) %4s %10s %10s %10s %6s %6s\n"), _u("Live"), _u("Free"), _u("Unused"), _u("Total"), _u("Free%"), _u("Unused%"));
- Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
- PrintHeapBlockMemoryStats(_u("Small"), HeapBlock::SmallNormalBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockMemoryStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("SmFinSWB"), HeapBlock::SmallFinalizableBlockWithBarrierType);
- Output::Print(_u("\n"));
- #endif
- PrintHeapBlockMemoryStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockMemoryStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("MdFinSWB"), HeapBlock::MediumFinalizableBlockWithBarrierType);
- Output::Print(_u("\n"));
- #endif
- PrintHeapBlockMemoryStats(_u("MdLeaf"), HeapBlock::MediumLeafBlockType);
- Output::Print(_u("\n"));
- size_t largeHeapBlockUnusedByteCount = collectionStats.largeHeapBlockTotalByteCount - collectionStats.largeHeapBlockUsedByteCount
- - collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType];
- Output::Print(_u(" Large: %10d %10d %10d %10d %6.1f %6.1f\n"),
- collectionStats.largeHeapBlockUsedByteCount,
- collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType],
- largeHeapBlockUnusedByteCount,
- collectionStats.largeHeapBlockTotalByteCount,
- (double)collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType] / (double)collectionStats.largeHeapBlockTotalByteCount * 100,
- (double)largeHeapBlockUnusedByteCount / (double)collectionStats.largeHeapBlockTotalByteCount * 100);
- Output::Print(_u("\nSmall heap block zeroing stats since last GC\n"));
- Output::Print(_u("Number of blocks with sweep state empty: normal=%d finalizable=%d leaf=%d\nNumber of blocks zeroed: %d\n"),
- collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockWithBarrierType]
- #endif
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::SmallLeafBlockType]
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumLeafBlockType],
- collectionStats.numZeroedOutSmallBlocks);
- }
- void
- Recycler::PrintCollectStats()
- {
- Output::Print(_u("Collection Stats:\n"));
- PrintHeuristicCollectionStats();
- PrintMarkCollectionStats();
- PrintBackgroundCollectionStats();
- size_t freeCount = collectionStats.objectSweptCount - collectionStats.objectSweptFreeListCount;
- size_t freeBytes = collectionStats.objectSweptBytes - collectionStats.objectSweptFreeListBytes;
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("Rescan : %5s %6s %10s | Track : %5s | "), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"));
- #endif
- Output::Print(_u("Sweep : %7s | SweptObj : %5s %5s %10s\n"), _u("Count"), _u("Count"), _u("%%"), _u("Bytes"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" Small : "));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("%5d %6d %10d | "), collectionStats.markData.rescanPageCount, collectionStats.markData.rescanObjectCount, collectionStats.markData.rescanObjectByteCount);
- #endif
- #if ENABLE_CONCURRENT_GC
- Output::Print(_u("Process : %5d | "), collectionStats.trackedObjectCount);
- #else
- Output::Print(_u(" | "));
- #endif
- Output::Print(_u(" Scan : %7d | Free : %6d %5.1f %10d\n"),
- collectionStats.objectSweepScanCount,
- freeCount, (double)freeCount / (double) collectionStats.objectSweptCount * 100, freeBytes);
- Output::Print(_u(" Large : "));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("%5d %6d %10d | "),
- collectionStats.markData.rescanLargePageCount, collectionStats.markData.rescanLargeObjectCount, collectionStats.markData.rescanLargeByteCount);
- #endif
- #if ENABLE_PARTIAL_GC
- Output::Print(_u("Client : %5d | "), collectionStats.clientTrackedObjectCount);
- #else
- Output::Print(_u(" | "));
- #endif
- Output::Print(_u(" Finalize : %7d | Free List: %6d %5.1f %10d\n"),
- collectionStats.finalizeSweepCount,
- collectionStats.objectSweptFreeListCount, (double)collectionStats.objectSweptFreeListCount / (double) collectionStats.objectSweptCount * 100, collectionStats.objectSweptFreeListBytes);
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("SweptBlk: Live Free Total Free%% : Swept Swept%% : CSwpt CSwpt%%"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Partial : Count Bytes Existing"));
- }
- #endif
- Output::Print(_u("\n"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- PrintHeapBlockStats(_u("Small"), HeapBlock::SmallNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Reuse : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallNormalBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- #endif
- // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
- PrintHeapBlockStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Reuse : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- #endif
- // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
- PrintHeapBlockStats(_u("MdLeaf"), HeapBlock::MediumNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- // TODO: This can't possibly be correct...check on this later
- PrintHeapBlockStats(_u("Large"), HeapBlock::LargeBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | UnusedFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintMemoryStats();
- Output::Flush();
- }
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- void Recycler::VerifyPageHeapFillAfterAlloc(char* memBlock, size_t size, ObjectInfoBits attributes)
- {
- Assert(memBlock != nullptr);
- if (IsPageHeapEnabled())
- {
- HeapBlock* heapBlock = this->FindHeapBlock(memBlock);
- Assert(heapBlock);
- if (heapBlock->IsLargeHeapBlock())
- {
- LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
- if (largeHeapBlock->InPageHeapMode()
- #ifdef RECYCLER_NO_PAGE_REUSE
- && !largeHeapBlock->GetPageAllocator(largeHeapBlock->heapInfo)->IsPageReuseDisabled()
- #endif
- )
- {
- largeHeapBlock->VerifyPageHeapPattern();
- }
- }
- }
- }
- #endif
- #ifdef RECYCLER_ZERO_MEM_CHECK
- void
- Recycler::VerifyZeroFill(void * address, size_t size)
- {
- byte expectedFill = 0;
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- expectedFill = Recycler::VerifyMemFill;
- }
- #endif
- Assert(IsAll((byte *)address, size, expectedFill));
- }
- void
- Recycler::VerifyLargeAllocZeroFill(void * address, size_t size, ObjectInfoBits attributes)
- {
- // Large allocs will have already written the dummy vtable at the beginning of the allocation
- // if either FinalizeBit or TrackBit attributes were set. Skip the verify for that memory
- // if that is the case.
- if ((attributes & (FinalizeBit | TrackBit)) != 0)
- {
- // Verify that it really is the dummy v-table before skipping it.
- DummyVTableObject dummy;
- Assert((*(void**)(&dummy)) == *((void**)address));
- address = ((char*)address) + sizeof(DummyVTableObject);
- size -= sizeof(DummyVTableObject);
- }
- VerifyZeroFill(address, size);
- }
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- void
- Recycler::FillCheckPad(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
- {
- if (this->VerifyEnabled())
- {
- void* addressToVerify = address;
- size_t sizeToVerify = alignedAllocSize;
- if (objectAlreadyInitialized)
- {
- addressToVerify = ((char*) address + size);
- sizeToVerify = (alignedAllocSize - size);
- }
- else
- {
- // It could be the case that an uninitialized object already has a dummy vtable installed
- // at the beginning of the address. If that is the case, we can't verify the fill pattern
- // on that memory, since it's already been initialized.
- // Note that FillPadNoCheck will skip over the first sizeof(FreeObject) bytes, which
- // prevents overwriting of the vtable.
- static_assert(sizeof(DummyVTableObject) == sizeof(void*), "Incorrect size for a DummyVTableObject - it must contain a single v-table pointer");
- DummyVTableObject dummy;
- if ((*(void**)(&dummy)) == *((void**)address))
- {
- addressToVerify = (char*)address + sizeof(DummyVTableObject);
- sizeToVerify = alignedAllocSize - sizeof(DummyVTableObject);
- }
- }
- // Actually this is filling the non-pad to zero
- VerifyCheckFill(addressToVerify, sizeToVerify - sizeof(size_t));
- FillPadNoCheck(address, size, alignedAllocSize, objectAlreadyInitialized);
- }
- }
- void
- Recycler::FillPadNoCheck(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
- {
- // Ignore the first word
- if (!objectAlreadyInitialized && size > sizeof(FreeObject))
- {
- memset((char *)address + sizeof(FreeObject), 0, size - sizeof(FreeObject));
- }
- // write the pad size at the end;
- *(size_t *)((char *)address + alignedAllocSize - sizeof(size_t)) = alignedAllocSize - size;
- }
- void Recycler::Verify(Js::Phase phase)
- {
- if (verifyEnabled && (!this->CollectionInProgress()))
- {
- if (GetRecyclerFlagsTable().RecyclerVerify.IsEnabled(phase))
- {
- autoHeap.Verify();
- }
- }
- }
- void Recycler::VerifyCheck(BOOL cond, char16 const * msg, void * address, void * corruptedAddress)
- {
- if (!(cond))
- {
- fwprintf(stderr, _u("RECYCLER CORRUPTION: StartAddress=%p CorruptedAddress=%p: %s"), address, corruptedAddress, msg);
- Js::Throw::FatalInternalError();
- }
- }
- void Recycler::VerifyCheckFill(void * address, size_t size)
- {
- Assert(IsAll((byte*)address, size, Recycler::VerifyMemFill));
- }
- void Recycler::VerifyCheckPadExplicitFreeList(void * address, size_t size)
- {
- size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
- size_t padding = *paddingAddress;
- #pragma warning(suppress:4310)
- Assert(padding != (size_t)0xCACACACACACACACA); // Explicit free objects have to have been initialized at some point before they were freed
- Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
- for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
- {
- Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
- }
- }
- void Recycler::VerifyCheckPad(void * address, size_t size)
- {
- size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
- size_t padding = *paddingAddress;
- #pragma warning(suppress:4310)
- if (padding == (size_t)0xCACACACACACACACA)
- {
- // Nascent block have objects that are not initialized with pad size
- Recycler::VerifyCheckFill(address, size);
- return;
- }
- Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
- for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
- {
- Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
- }
- }
- #endif
- Recycler::AutoSetupRecyclerForNonCollectingMark::AutoSetupRecyclerForNonCollectingMark(Recycler& recycler, bool setupForHeapEnumeration)
- : m_recycler(recycler), m_setupDone(false)
- {
- if (! setupForHeapEnumeration)
- {
- DoCommonSetup();
- }
- }
- void Recycler::AutoSetupRecyclerForNonCollectingMark::DoCommonSetup()
- {
- Assert(m_recycler.collectionState == CollectionStateNotCollecting || m_recycler.collectionState == CollectionStateExit);
- #if ENABLE_CONCURRENT_GC
- Assert(!m_recycler.DoQueueTrackedObject());
- #endif
- #if ENABLE_PARTIAL_GC
- // We need to get out of partial collect before we do the mark because we
- // will mess with the free bit vector state
- // GC-CONSIDER: don't mess with the free bit vector?
- if (m_recycler.inPartialCollectMode)
- {
- m_recycler.FinishPartialCollect();
- }
- #endif
- m_previousCollectionState = m_recycler.collectionState;
- #ifdef RECYCLER_STATS
- m_previousCollectionStats = m_recycler.collectionStats;
- memset(&m_recycler.collectionStats, 0, sizeof(RecyclerCollectionStats));
- #endif
- m_setupDone = true;
- }
- void Recycler::AutoSetupRecyclerForNonCollectingMark::SetupForHeapEnumeration()
- {
- Assert(!m_recycler.isHeapEnumInProgress);
- Assert(!m_recycler.allowAllocationDuringHeapEnum);
- m_recycler.EnsureNotCollecting();
- DoCommonSetup();
- m_recycler.ResetMarks(ResetMarkFlags_HeapEnumeration);
- m_recycler.SetCollectionState(CollectionStateNotCollecting);
- m_recycler.isHeapEnumInProgress = true;
- m_recycler.isCollectionDisabled = true;
- }
- Recycler::AutoSetupRecyclerForNonCollectingMark::~AutoSetupRecyclerForNonCollectingMark()
- {
- Assert(m_setupDone);
- Assert(!m_recycler.allowAllocationDuringHeapEnum);
- #ifdef RECYCLER_STATS
- m_recycler.collectionStats = m_previousCollectionStats;
- #endif
- m_recycler.SetCollectionState(m_previousCollectionState);
- m_recycler.isHeapEnumInProgress = false;
- m_recycler.isCollectionDisabled = false;
- }
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- bool Recycler::DumpObjectGraph(RecyclerObjectGraphDumper::Param * param)
- {
- bool succeeded = false;
- bool isExited = (this->collectionState == CollectionStateExit);
- if (isExited)
- {
- this->SetCollectionState(CollectionStateNotCollecting);
- }
- if (this->collectionState != CollectionStateNotCollecting)
- {
- Output::Print(_u("Can't dump object graph when collecting\n"));
- Output::Flush();
- return succeeded;
- }
- BEGIN_NO_EXCEPTION
- {
- RecyclerObjectGraphDumper objectGraphDumper(this, param);
- Recycler::AutoSetupRecyclerForNonCollectingMark AutoSetupRecyclerForNonCollectingMark(*this);
- AutoRestoreValue<bool> skipStackToggle(&this->skipStack, this->skipStack || (param && param->skipStack));
- this->Mark();
- this->objectGraphDumper = nullptr;
- #ifdef RECYCLER_STATS
- if (param)
- {
- param->stats = this->collectionStats;
- }
- #endif
- succeeded = !objectGraphDumper.isOutOfMemory;
- }
- END_NO_EXCEPTION
- if (isExited)
- {
- this->SetCollectionState(CollectionStateExit);
- }
- if (!succeeded)
- {
- Output::Print(_u("Out of memory dumping object graph\n"));
- }
- Output::Flush();
- return succeeded;
- }
- void
- Recycler::DumpObjectDescription(void *objectAddress)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- type_info const * typeinfo = nullptr;
- bool isArray = false;
- if (this->trackerDictionary)
- {
- TrackerData * trackerData = GetTrackerData(objectAddress);
- if (trackerData != nullptr)
- {
- typeinfo = trackerData->typeinfo;
- isArray = trackerData->isArray;
- }
- else
- {
- Assert(false);
- }
- }
- RecyclerObjectDumper::DumpObject(typeinfo, isArray, objectAddress);
- #else
- Output::Print(_u("Address %p"), objectAddress);
- #endif
- }
- #endif
- #ifdef RECYCLER_STRESS
- // All stress mode collect art implicitly instantiate here
- bool
- Recycler::StressCollectNow()
- {
- if (this->recyclerStress)
- {
- this->CollectNow<CollectStress>();
- return true;
- }
- #if ENABLE_CONCURRENT_GC
- else if (this->recyclerBackgroundStress)
- {
- this->CollectNow<CollectBackgroundStress>();
- return true;
- }
- else if ((this->enableConcurrentMark || this->enableConcurrentSweep)
- && (this->recyclerConcurrentStress
- || this->recyclerConcurrentRepeatStress))
- {
- #if ENABLE_PARTIAL_GC
- if (this->recyclerPartialStress)
- {
- this->CollectNow<CollectConcurrentPartialStress>();
- return true;
- }
- else
- #endif // ENABLE_PARTIAL_GC
- {
- this->CollectNow<CollectConcurrentStress>();
- return true;
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- else if (this->recyclerPartialStress)
- {
- this->CollectNow<CollectPartialStress>();
- return true;
- }
- #endif // ENABLE_PARTIAL_GC
- return false;
- }
- #endif // RECYCLER_STRESS
- #ifdef TRACK_ALLOC
- Recycler *
- Recycler::TrackAllocInfo(TrackAllocData const& data)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty());
- nextAllocData = data;
- }
- #endif
- return this;
- }
- void
- Recycler::ClearTrackAllocInfo(TrackAllocData* data/* = NULL*/)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- AssertMsg(!nextAllocData.IsEmpty(), "Missing tracking information for this allocation, are you not using the macros?");
- if (data)
- {
- *data = nextAllocData;
- }
- nextAllocData.Clear();
- }
- #endif
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- bool
- Recycler::DoProfileAllocTracker()
- {
- bool doTracker = false;
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- doTracker = Js::Configuration::Global.flags.DumpObjectGraphOnExit
- || Js::Configuration::Global.flags.DumpObjectGraphOnCollect
- || Js::Configuration::Global.flags.DumpObjectGraphOnEnum;
- #endif
- #ifdef LEAK_REPORT
- if (Js::Configuration::Global.flags.IsEnabled(Js::LeakReportFlag))
- {
- doTracker = true;
- }
- #endif
- #ifdef CHECK_MEMORY_LEAK
- if (Js::Configuration::Global.flags.CheckMemoryLeak)
- {
- doTracker = true;
- }
- #endif
- if (CONFIG_FLAG(KeepRecyclerTrackData))
- {
- doTracker = true;
- }
- return doTracker || MemoryProfiler::DoTrackRecyclerAllocation();
- }
- void
- Recycler::InitializeProfileAllocTracker()
- {
- if (DoProfileAllocTracker())
- {
- trackerDictionary = NoCheckHeapNew(TypeInfotoTrackerItemMap, &NoCheckHeapAllocator::Instance, 163);
- trackerCriticalSection = new CriticalSection(1000);
- #pragma prefast(suppress:6031, "InitializeCriticalSectionAndSpinCount always succeed since Vista. No need to check return value");
- }
- nextAllocData.Clear();
- }
- void
- Recycler::TrackAllocCore(void * object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
- {
- auto&& typeInfo = trackAllocData.GetTypeInfo();
- if (CONFIG_FLAG(KeepRecyclerTrackData))
- {
- TrackFree((char*)object, size);
- }
- Assert(GetTrackerData(object) == nullptr || GetTrackerData(object) == &TrackerData::ExplicitFreeListObjectData);
- Assert(typeInfo != nullptr);
- TrackerItem * item;
- size_t allocCount = trackAllocData.GetCount();
- size_t itemSize = (size - trackAllocData.GetPlusSize());
- bool isArray;
- if (allocCount != (size_t)-1)
- {
- isArray = true;
- itemSize = itemSize / allocCount;
- }
- else
- {
- isArray = false;
- allocCount = 1;
- }
- if (!trackerDictionary->TryGetValue(typeInfo, &item))
- {
- #ifdef STACK_BACK_TRACE
- if (CONFIG_FLAG(KeepRecyclerTrackData) && isArray) // type info is not useful record stack instead
- {
- size_t stackTraceSize = 16 * sizeof(void*);
- item = NoCheckHeapNewPlus(stackTraceSize, TrackerItem, typeInfo);
- StackBackTrace::Capture((char*)&item[1], stackTraceSize, 7);
- }
- else
- #endif
- {
- item = NoCheckHeapNew(TrackerItem, typeInfo);
- }
- item->instanceData.ItemSize = itemSize;
- item->arrayData.ItemSize = itemSize;
- trackerDictionary->Item(typeInfo, item);
- }
- else
- {
- Assert(item->instanceData.typeinfo == typeInfo);
- Assert(item->instanceData.ItemSize == itemSize);
- Assert(item->arrayData.ItemSize == itemSize);
- }
- TrackerData& data = (isArray)? item->arrayData : item->instanceData;
- data.ItemCount += allocCount;
- data.AllocCount++;
- data.ReqSize += size;
- data.AllocSize += HeapInfo::GetAlignedSizeNoCheck(size);
- #ifdef TRACE_OBJECT_LIFETIME
- data.TraceLifetime = traceLifetime;
- if (traceLifetime)
- {
- Output::Print(data.isArray ? _u("Allocated %S[] %p\n") : _u("Allocated %S %p\n"), data.typeinfo->name(), object);
- }
- #endif
- #ifdef PERF_COUNTERS
- ++data.counter;
- data.sizeCounter += HeapInfo::GetAlignedSizeNoCheck(size);
- #endif
- SetTrackerData(object, &data);
- }
- void* Recycler::TrackAlloc(void* object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
- {
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty()); // should have been cleared
- trackerCriticalSection->Enter();
- TrackAllocCore(object, size, trackAllocData);
- trackerCriticalSection->Leave();
- }
- return object;
- }
- void
- Recycler::TrackIntegrate(__in_ecount(blockSize) char * blockAddress, size_t blockSize, size_t allocSize, size_t objectSize, const TrackAllocData& trackAllocData)
- {
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty()); // should have been cleared
- trackerCriticalSection->Enter();
- char * address = blockAddress;
- char * blockEnd = blockAddress + blockSize;
- while (address + allocSize <= blockEnd)
- {
- TrackAllocCore(address, objectSize, trackAllocData);
- address += allocSize;
- }
- trackerCriticalSection->Leave();
- }
- }
- BOOL Recycler::TrackFree(const char* address, size_t size)
- {
- if (this->trackerDictionary != nullptr)
- {
- trackerCriticalSection->Enter();
- TrackerData * data = GetTrackerData((char *)address);
- if (data != nullptr)
- {
- if (data != &TrackerData::EmptyData)
- {
- #ifdef PERF_COUNTERS
- --data->counter;
- data->sizeCounter -= size;
- #endif
- if (data->typeinfo == &typeid(RecyclerWeakReferenceBase))
- {
- TrackFreeWeakRef((RecyclerWeakReferenceBase *)address);
- }
- data->FreeSize += size;
- data->FreeCount++;
- #ifdef TRACE_OBJECT_LIFETIME
- if (data->TraceLifetime)
- {
- Output::Print(data->isArray ? _u("Freed %S[] %p\n") : _u("Freed %S %p\n"), data->typeinfo->name(), address);
- }
- #endif
- }
- SetTrackerData((char *)address, nullptr);
- }
- else
- {
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- Assert(false);
- }
- }
- trackerCriticalSection->Leave();
- }
- return true;
- }
- Recycler::TrackerData *
- Recycler::GetTrackerData(void * address)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(address);
- Assert(heapBlock != nullptr);
- return (Recycler::TrackerData *)heapBlock->GetTrackerData(address);
- }
- void
- Recycler::SetTrackerData(void * address, TrackerData * data)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(address);
- Assert(heapBlock != nullptr);
- heapBlock->SetTrackerData(address, data);
- }
- void
- Recycler::TrackUnallocated(__in char* address, __in char *endAddress, size_t sizeCat)
- {
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- if (this->trackerDictionary != nullptr)
- {
- trackerCriticalSection->Enter();
- while (address + sizeCat <= endAddress)
- {
- Assert(GetTrackerData(address) == nullptr);
- SetTrackerData(address, &TrackerData::EmptyData);
- address += sizeCat;
- }
- trackerCriticalSection->Leave();
- }
- }
- }
- void
- Recycler::TrackAllocWeakRef(RecyclerWeakReferenceBase * weakRef)
- {
- #if ENABLE_RECYCLER_TYPE_TRACKING
- Assert(weakRef->typeInfo != nullptr);
- #endif
- #if DBG && defined(PERF_COUNTERS)
- if (this->trackerDictionary != nullptr)
- {
- TrackerItem * item;
- if (trackerDictionary->TryGetValue(weakRef->typeInfo, &item))
- {
- weakRef->counter = &item->weakRefCounter;
- }
- else
- {
- weakRef->counter = &PerfCounter::RecyclerTrackerCounterSet::GetWeakRefPerfCounter(weakRef->typeInfo);
- }
- ++(*weakRef->counter);
- }
- #endif
- }
- void
- Recycler::TrackFreeWeakRef(RecyclerWeakReferenceBase * weakRef)
- {
- #if DBG && defined(PERF_COUNTERS)
- if (weakRef->counter != nullptr)
- {
- --(*weakRef->counter);
- }
- #endif
- }
- void
- Recycler::PrintAllocStats()
- {
- if (this->trackerDictionary == nullptr)
- {
- return;
- }
- size_t itemCount = 0;
- int allocCount = 0;
- int64 reqSize = 0;
- int64 allocSize = 0;
- int freeCount = 0;
- int64 freeSize = 0;
- Output::Print(_u("=================================================================================================================\n"));
- Output::Print(_u("Recycler Allocations\n"));
- Output::Print(_u("=================================================================================================================\n"));
- Output::Print(_u("ItemSize ItemCount AllocCount RequestSize AllocSize FreeCount FreeSize DiffCount DiffSize \n"));
- Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
- for (int i = 0; i < trackerDictionary->Count(); i++)
- {
- TrackerItem * item = trackerDictionary->GetValueAt(i);
- type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
- if (item->instanceData.AllocCount != 0)
- {
- Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S\n"),
- item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
- item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
- itemCount += item->instanceData.ItemCount;
- allocCount += item->instanceData.AllocCount;
- reqSize += item->instanceData.ReqSize;
- allocSize += item->instanceData.AllocSize;
- freeCount += item->instanceData.FreeCount;
- freeSize += item->instanceData.FreeSize;
- }
- if (item->arrayData.AllocCount != 0)
- {
- Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S[]\n"),
- item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
- item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
- itemCount += item->arrayData.ItemCount;
- allocCount += item->arrayData.AllocCount;
- reqSize += item->arrayData.ReqSize;
- allocSize += item->arrayData.AllocSize;
- freeCount += item->arrayData.FreeCount;
- freeSize += item->arrayData.FreeSize;
- }
- }
- Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
- Output::Print(_u(" %8d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d **Total**\n"),
- itemCount, allocCount, reqSize, allocSize, freeCount, freeSize, allocCount - freeCount, allocSize - freeSize);
- #ifdef EXCEL_FRIENDLY_DUMP
- Output::Print(_u("\nExcel friendly version\nItemSize\tItemCount\tAllocCount\tRequestSize\tAllocSize\tFreeCount\tFreeSize\tDiffCount\tDiffSize\tType\n"));
- for (int i = 0; i < trackerDictionary->Count(); i++)
- {
- TrackerItem * item = trackerDictionary->GetValueAt(i);
- type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
- if (item->instanceData.AllocCount != 0)
- {
- Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S\n"),
- item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
- item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
- }
- if (item->arrayData.AllocCount != 0)
- {
- Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S[]\n"),
- item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
- item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
- }
- }
- #endif // EXCEL_FRIENDLY_DUMP
- Output::Flush();
- }
- #endif // PROFILE_RECYCLER_ALLOC
- #endif // TRACK_ALLOC
- #ifdef RECYCLER_VERIFY_MARK
- void
- Recycler::VerifyMark()
- {
- VerifyMarkRoots();
- // Can't really verify stack since the recycler code between ScanStack to now may have introduce false references.
- // VerifyMarkStack();
- autoHeap.VerifyMark();
- }
- void
- Recycler::VerifyMarkRoots()
- {
- {
- this->VerifyMark(transientPinnedObject);
- pinnedObjectMap.Map([this](void * obj, PinRecord const &refCount)
- {
- if (refCount == 0)
- {
- Assert(this->hasPendingUnpinnedObject);
- }
- else
- {
- // Use the pinrecord as the source reference
- this->VerifyMark(obj);
- }
- });
- }
- DList<GuestArenaAllocator, HeapAllocator>::Iterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- if (guestArenaIter.Data().pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- }
- else
- {
- VerifyMarkArena(&guestArenaIter.Data());
- }
- }
- DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
- while (externalGuestArenaIter.Next())
- {
- VerifyMarkArena(externalGuestArenaIter.Data());
- }
- // We can't check external roots here
- }
- void
- Recycler::VerifyMarkArena(ArenaData * alloc)
- {
- VerifyMarkBigBlockList(alloc->GetBigBlocks(false));
- VerifyMarkBigBlockList(alloc->GetFullBlocks());
- VerifyMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
- }
- void
- Recycler::VerifyMarkBigBlockList(BigBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t slotCount = blockp->currentByte / sizeof(void*);
- scanRootBytes += blockp->currentByte;
- for (size_t i=0; i < slotCount; i++)
- {
- VerifyMark(base[i]);
- }
- blockp = blockp->nextBigBlock;
- }
- }
- void
- Recycler::VerifyMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- ArenaMemoryBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t slotCount = blockp->nbytes / sizeof(void*);
- scanRootBytes += blockp->nbytes;
- for (size_t i=0; i< slotCount; i++)
- {
- VerifyMark(base[i]);
- }
- blockp = blockp->next;
- }
- }
- void
- Recycler::VerifyMarkStack()
- {
- SAVE_THREAD_CONTEXT();
- void ** stackTop = (void**) this->savedThreadContext.GetStackTop();
- void * stackStart = GetStackBase();
- Assert(stackStart > stackTop);
- for (;stackTop < stackStart; stackTop++)
- {
- void* candidate = *stackTop;
- VerifyMark(nullptr, candidate);
- }
- void** registers = this->savedThreadContext.GetRegisters();
- for (int i = 0; i < SavedRegisterState::NumRegistersToSave; i++)
- {
- VerifyMark(nullptr, registers[i]);
- }
- }
- bool
- Recycler::VerifyMark(void * target)
- {
- return VerifyMark(nullptr, target);
- }
- // objectAddress is nullptr in case of roots
- bool
- Recycler::VerifyMark(void * objectAddress, void * target)
- {
- void * realAddress;
- HeapBlock * heapBlock;
- if (this->enableScanInteriorPointers)
- {
- heapBlock = heapBlockMap.GetHeapBlock(target);
- if (heapBlock == nullptr)
- {
- return false;
- }
- realAddress = heapBlock->GetRealAddressFromInterior(target);
- if (realAddress == nullptr)
- {
- return false;
- }
- }
- else
- {
- heapBlock = this->FindHeapBlock(target);
- if (heapBlock == nullptr)
- {
- return false;
- }
- realAddress = target;
- }
- return heapBlock->VerifyMark(objectAddress, realAddress);
- }
- #endif
- ArenaAllocator *
- Recycler::CreateGuestArena(char16 const * name, void (*outOfMemoryFunc)())
- {
- // Note, guest arenas use the large block allocator.
- return guestArenaList.PrependNode(&HeapAllocator::Instance, name, this->GetDefaultHeapInfo()->GetRecyclerLargeBlockPageAllocator(), outOfMemoryFunc);
- }
- void
- Recycler::DeleteGuestArena(ArenaAllocator * arenaAllocator)
- {
- GuestArenaAllocator * guestArenaAllocator = static_cast<GuestArenaAllocator *>(arenaAllocator);
- #if ENABLE_CONCURRENT_GC
- if (this->hasPendingConcurrentFindRoot)
- {
- // We are doing concurrent find root, don't modify the list and mark the arena to be delete
- // later when we do find root in thread.
- Assert(guestArenaList.HasElement(guestArenaAllocator));
- this->hasPendingDeleteGuestArena = true;
- guestArenaAllocator->pendingDelete = true;
- }
- else
- #endif
- {
- guestArenaList.RemoveElement(&HeapAllocator::Instance, guestArenaAllocator);
- }
- // Any time a root is removed during a GC, it indicates that an exhaustive
- // collection is likely going to have work to do so trigger an exhaustive
- // candidate GC to indicate this fact
- this->CollectNow<CollectExhaustiveCandidate>();
- }
- #ifdef LEAK_REPORT
- void
- Recycler::ReportLeaks()
- {
- if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
- {
- if (GetRecyclerFlagsTable().ForceMemoryLeak)
- {
- AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
- struct FakeMemory { Field(int) f; };
- FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
- this->RootAddRef(f);
- }
- LeakReport::StartSection(_u("Object Graph"));
- LeakReport::StartRedirectOutput();
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.skipStack = true;
- if (!this->DumpObjectGraph(¶m))
- {
- LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
- LeakReport::Print(_u("ERROR: Out of memory generating leak report\n"));
- param.stats.markData.markCount = 0;
- }
- LeakReport::EndRedirectOutput();
- if (param.stats.markData.markCount != 0)
- {
- LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
- LeakReport::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
- param.stats.markData.markBytes, param.stats.markData.markCount);
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- LeakReport::StartSection(_u("Pinned object stack traces"));
- LeakReport::StartRedirectOutput();
- this->PrintPinnedObjectStackTraces();
- LeakReport::EndRedirectOutput();
- LeakReport::EndSection();
- }
- #endif
- }
- LeakReport::EndSection();
- }
- }
- void
- Recycler::ReportLeaksOnProcessDetach()
- {
- if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
- {
- AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): Process Termination"), this);
- LeakReport::StartRedirectOutput();
- ReportOnProcessDetach([=]() { this->ReportLeaks(); });
- LeakReport::EndRedirectOutput();
- }
- }
- #endif
- #ifdef CHECK_MEMORY_LEAK
- void
- Recycler::CheckLeaks(char16 const * header)
- {
- if (GetRecyclerFlagsTable().CheckMemoryLeak && this->isPrimaryMarkContextInitialized)
- {
- if (GetRecyclerFlagsTable().ForceMemoryLeak)
- {
- AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
- struct FakeMemory { Field(int) f; };
- FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
- this->RootAddRef(f);
- }
- Output::CaptureStart();
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Recycler (%p): %s Leaked Roots\n"), this, header);
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.dumpRootOnly = true;
- param.skipStack = true;
- if (!this->DumpObjectGraph(¶m))
- {
- free(Output::CaptureEnd());
- Output::Print(_u("ERROR: Out of memory generating leak report\n"));
- return;
- }
- if (param.stats.markData.markCount != 0)
- {
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Pinned object stack traces"));
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- this->PrintPinnedObjectStackTraces();
- }
- #endif
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
- param.stats.markData.markBytes, param.stats.markData.markCount);
- char16 * buffer = Output::CaptureEnd();
- MemoryLeakCheck::AddLeakDump(buffer, param.stats.markData.markBytes, param.stats.markData.markCount);
- #ifdef GENERATE_DUMP
- if (GetRecyclerFlagsTable().IsEnabled(Js::DumpOnLeakFlag))
- {
- Js::Throw::GenerateDump(GetRecyclerFlagsTable().DumpOnLeak);
- }
- #endif
- }
- else
- {
- free(Output::CaptureEnd());
- }
- }
- }
- void
- Recycler::CheckLeaksOnProcessDetach(char16 const * header)
- {
- if (GetRecyclerFlagsTable().CheckMemoryLeak)
- {
- ReportOnProcessDetach([=]() { this->CheckLeaks(header); });
- }
- }
- #endif
- #if defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- template <class Fn>
- void
- Recycler::ReportOnProcessDetach(Fn fn)
- {
- #if DBG
- // Process detach can be done on any thread, just disable the thread check
- this->markContext.GetPageAllocator()->SetDisableThreadAccessCheck();
- #endif
- #if ENABLE_CONCURRENT_GC
- if (this->IsConcurrentState())
- {
- this->AbortConcurrent(true);
- }
- if (this->CollectionInProgress())
- {
- Output::Print(_u("WARNING: Thread terminated during GC. Can't dump object graph\n"));
- return;
- }
- #else
- Assert(!this->CollectionInProgress());
- #endif
- // Don't mark external roots on another thread
- this->SetExternalRootMarker(NULL, NULL);
- #if DBG
- this->ResetThreadId();
- #endif
- fn();
- }
- #ifdef STACK_BACK_TRACE
- void
- Recycler::PrintPinnedObjectStackTraces()
- {
- pinnedObjectMap.Map([this](void * object, PinRecord const& pinRecord)
- {
- this->DumpObjectDescription(object);
- Output::Print(_u("\n"));
- StackBackTraceNode::PrintAll(pinRecord.stackBackTraces);
- }
- );
- }
- #endif
- #endif
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- void
- Recycler::SetInDllCanUnloadNow()
- {
- inDllCanUnloadNow = true;
- // Just clear out the root marker for the dump graph and report leaks
- SetExternalRootMarker(NULL, NULL);
- }
- void
- Recycler::SetInDetachProcess()
- {
- inDetachProcess = true;
- // Just clear out the root marker for the dump graph and report leaks
- SetExternalRootMarker(NULL, NULL);
- }
- #endif
- #ifdef ENABLE_JS_ETW
- ULONG Recycler::EventWriteFreeMemoryBlock(HeapBlock* heapBlock)
- {
- if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK())
- {
- char* memoryAddress = NULL;
- ULONG objectSize = 0;
- ULONG blockSize = 0;
- switch (heapBlock->GetHeapBlockType())
- {
- case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
- case HeapBlock::HeapBlockType::SmallNormalBlockType:
- #ifdef RECYCLER_WRITE_BARRIER
- case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
- case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
- #endif
- case HeapBlock::HeapBlockType::SmallLeafBlockType:
- {
- SmallHeapBlock* smallHeapBlock = static_cast<SmallHeapBlock*>(heapBlock);
- memoryAddress = smallHeapBlock->GetAddress();
- blockSize = (ULONG)(smallHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = smallHeapBlock->GetObjectSize();
- }
- break;
- case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
- case HeapBlock::HeapBlockType::MediumNormalBlockType:
- #ifdef RECYCLER_WRITE_BARRIER
- case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
- case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
- #endif
- case HeapBlock::HeapBlockType::MediumLeafBlockType:
- {
- MediumHeapBlock* mediumHeapBlock = static_cast<MediumHeapBlock*>(heapBlock);
- memoryAddress = mediumHeapBlock->GetAddress();
- blockSize = (ULONG)(mediumHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = mediumHeapBlock->GetObjectSize();
- }
- case HeapBlock::HeapBlockType::LargeBlockType:
- {
- LargeHeapBlock* largeHeapBlock = static_cast<LargeHeapBlock*>(heapBlock);
- memoryAddress = largeHeapBlock->GetBeginAddress();
- blockSize = (ULONG)(largeHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = blockSize;
- }
- break;
- default:
- AssertMsg(FALSE, "invalid heapblock type");
- }
- EventWriteJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK(memoryAddress, blockSize, objectSize);
- }
- return S_OK;
- }
- void Recycler::FlushFreeRecord()
- {
- Assert(bulkFreeMemoryWrittenCount <= Recycler::BulkFreeMemoryCount);
- JS_ETW(EventWriteJSCRIPT_RECYCLER_FREE_MEMORY(bulkFreeMemoryWrittenCount, sizeof(Recycler::ETWFreeRecord), etwFreeRecords));
- bulkFreeMemoryWrittenCount = 0;
- }
- void Recycler::AppendFreeMemoryETWRecord(__in char *address, size_t size)
- {
- Assert(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
- __analysis_assume(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
- etwFreeRecords[bulkFreeMemoryWrittenCount].memoryAddress = address;
- // TODO: change to size_t or uint64?
- etwFreeRecords[bulkFreeMemoryWrittenCount].objectSize = (uint)size;
- bulkFreeMemoryWrittenCount++;
- if (bulkFreeMemoryWrittenCount == Recycler::BulkFreeMemoryCount)
- {
- FlushFreeRecord();
- Assert(bulkFreeMemoryWrittenCount == 0);
- }
- }
- #endif
- #ifdef PROFILE_EXEC
- ArenaAllocator *
- Recycler::AddBackgroundProfilerArena()
- {
- return this->backgroundProfilerArena.PrependNode(&HeapAllocator::Instance,
- _u("BgGCProfiler"), &this->backgroundProfilerPageAllocator, Js::Throw::OutOfMemory);
- }
- void
- Recycler::ReleaseBackgroundProfilerArena(ArenaAllocator * arena)
- {
- this->backgroundProfilerArena.RemoveElement(&HeapAllocator::Instance, arena);
- }
- void
- Recycler::SetProfiler(Js::Profiler * profiler, Js::Profiler * backgroundProfiler)
- {
- this->profiler = profiler;
- this->backgroundProfiler = backgroundProfiler;
- }
- #endif
- void Recycler::SetObjectBeforeCollectCallback(void* object,
- ObjectBeforeCollectCallback callback,
- void* callbackState,
- ObjectBeforeCollectCallbackWrapper callbackWrapper,
- void* threadContext)
- {
- if (objectBeforeCollectCallbackState == ObjectBeforeCollectCallback_Shutdown)
- {
- return; // NOP at shutdown
- }
- if (objectBeforeCollectCallbackMap == nullptr)
- {
- if (callback == nullptr) return;
- objectBeforeCollectCallbackMap = HeapNew(ObjectBeforeCollectCallbackMap, &HeapAllocator::Instance);
- }
- // only allow 1 callback per object
- objectBeforeCollectCallbackMap->Item(object, ObjectBeforeCollectCallbackData(callbackWrapper, callback, callbackState, threadContext));
- if (callback != nullptr && this->IsInObjectBeforeCollectCallback()) // revive
- {
- this->ScanMemory<false>(&object, sizeof(object));
- this->ProcessMark(/*background*/false);
- }
- }
- bool Recycler::ProcessObjectBeforeCollectCallbacks(bool atShutdown/*= false*/)
- {
- if (this->objectBeforeCollectCallbackMap == nullptr)
- {
- return false; // no callbacks
- }
- Assert(atShutdown || this->IsMarkState());
- Assert(!this->IsInObjectBeforeCollectCallback());
- AutoRestoreValue<ObjectBeforeCollectCallbackState> autoInObjectBeforeCollectCallback(&objectBeforeCollectCallbackState,
- atShutdown ? ObjectBeforeCollectCallback_Shutdown: ObjectBeforeCollectCallback_Normal);
- // The callbacks may register/unregister callbacks while we are enumerating the current map. To avoid
- // conflicting usage of the callback map, we swap it out. New registration will go to a new map.
- AutoAllocatorObjectPtr<ObjectBeforeCollectCallbackMap, HeapAllocator> oldCallbackMap(
- this->objectBeforeCollectCallbackMap, &HeapAllocator::Instance);
- this->objectBeforeCollectCallbackMap = nullptr;
- bool hasRemainingCallbacks = false;
- oldCallbackMap->MapAndRemoveIf([&](const ObjectBeforeCollectCallbackMap::EntryType& entry)
- {
- const ObjectBeforeCollectCallbackData& data = entry.Value();
- if (data.callback != nullptr)
- {
- void* object = entry.Key();
- if (atShutdown || !this->IsObjectMarked(object))
- {
- if (data.callbackWrapper != nullptr)
- {
- data.callbackWrapper(data.callback, object, data.callbackState, data.threadContext);
- }
- else
- {
- data.callback(object, data.callbackState);
- }
- }
- else
- {
- hasRemainingCallbacks = true;
- return false; // Do not remove this entry, remaining callback for future
- }
- }
- return true; // Remove this entry
- });
- // Merge back remaining callbacks if any
- if (hasRemainingCallbacks)
- {
- if (this->objectBeforeCollectCallbackMap == nullptr)
- {
- this->objectBeforeCollectCallbackMap = oldCallbackMap.Detach();
- }
- else
- {
- if (oldCallbackMap->Count() > this->objectBeforeCollectCallbackMap->Count())
- {
- // Swap so that oldCallbackMap is the smaller one
- ObjectBeforeCollectCallbackMap* tmp = oldCallbackMap.Detach();
- *&oldCallbackMap = this->objectBeforeCollectCallbackMap;
- this->objectBeforeCollectCallbackMap = tmp;
- }
- oldCallbackMap->Map([&](void* object, const ObjectBeforeCollectCallbackData& data)
- {
- this->objectBeforeCollectCallbackMap->Item(object, data);
- });
- }
- }
- return true; // maybe called callbacks
- }
- void Recycler::ClearObjectBeforeCollectCallbacks()
- {
- // This is called at shutting down. All objects will be gone. Invoke each registered callback if any.
- ProcessObjectBeforeCollectCallbacks(/*atShutdown*/true);
- Assert(objectBeforeCollectCallbackMap == nullptr);
- }
- #ifdef RECYCLER_TEST_SUPPORT
- void Recycler::SetCheckFn(BOOL(*checkFn)(char* addr, size_t size))
- {
- Assert(BinaryFeatureControl::RecyclerTest());
- this->EnsureNotCollecting();
- this->checkFn = checkFn;
- }
- #endif
- void
- Recycler::NotifyFree(__in char *address, size_t size)
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Sweeping object %p\n"), address);
- #ifdef RECYCLER_TEST_SUPPORT
- if (BinaryFeatureControl::RecyclerTest())
- {
- if (checkFn != NULL)
- checkFn(address, size);
- }
- #endif
- #ifdef ENABLE_JS_ETW
- if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY())
- {
- AppendFreeMemoryETWRecord(address, (UINT)size);
- }
- #endif
- RecyclerMemoryTracking::ReportFree(this, address, size);
- RECYCLER_PERF_COUNTER_DEC(LiveObject);
- RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, size);
- if (HeapInfo::IsSmallBlockAllocation(HeapInfo::GetAlignedSizeNoCheck(size)))
- {
- RECYCLER_PERF_COUNTER_DEC(SmallHeapBlockLiveObject);
- RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockLiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockFreeObjectSize, size);
- }
- else
- {
- RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockLiveObject);
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockFreeObjectSize, size);
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- VerifyCheckPad(address, size);
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- TrackFree(address, size);
- }
- #endif
- #ifdef RECYCLER_STATS
- collectionStats.objectSweptCount++;
- collectionStats.objectSweptBytes += size;
- if (!isForceSweeping)
- {
- collectionStats.objectSweptFreeListCount++;
- collectionStats.objectSweptFreeListBytes += size;
- }
- #endif
- }
- #if GLOBAL_ENABLE_WRITE_BARRIER
- void
- Recycler::RegisterPendingWriteBarrierBlock(void* address, size_t bytes)
- {
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- #if DBG
- WBSetBitRange((char*)address, (uint)bytes/sizeof(void*));
- #endif
- pendingWriteBarrierBlockMap.Item(address, bytes);
- RecyclerWriteBarrierManager::WriteBarrier(address, bytes);
- }
- }
- void
- Recycler::UnRegisterPendingWriteBarrierBlock(void* address)
- {
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- pendingWriteBarrierBlockMap.Remove(address);
- }
- }
- #endif
- #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
- void
- Recycler::WBVerifyBitIsSet(char* addr, char* target)
- {
- AutoCriticalSection lock(&recyclerListLock);
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- heapBlock->WBVerifyBitIsSet(addr);
- break;
- }
- recycler = recycler->next;
- }
- }
- void
- Recycler::WBSetBit(char* addr)
- {
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
- {
- AutoCriticalSection lock(&recyclerListLock);
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- heapBlock->WBSetBit(addr);
- break;
- }
- recycler = recycler->next;
- }
- }
- }
- void
- Recycler::WBSetBitRange(char* addr, uint count)
- {
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
- {
- AutoCriticalSection lock(&recyclerListLock);
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- heapBlock->WBSetBitRange(addr, count);
- break;
- }
- recycler = recycler->next;
- }
- }
- }
- bool
- Recycler::WBCheckIsRecyclerAddress(char* addr)
- {
- AutoCriticalSection lock(&recyclerListLock);
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- return true;
- }
- recycler = recycler->next;
- }
- return false;
- }
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- void
- Recycler::VerifyFinalize()
- {
- // We can't check this if we are marking
- Assert(!this->IsMarkState());
- size_t currentFinalizableObjectCount = this->autoHeap.GetFinalizeCount();
- #if DBG
- Assert(currentFinalizableObjectCount == this->collectionStats.finalizeCount);
- #else
- if (currentFinalizableObjectCount != >this->collectionStats.finalizeCount)
- {
- Output::Print(_u("ERROR: Recycler dropped some finalizable objects"));
- DebugBreak();
- }
- #endif
- }
- #endif
- size_t
- RecyclerHeapObjectInfo::GetSize() const
- {
- Assert(m_heapBlock);
- size_t size;
- #if LARGEHEAPBLOCK_ENCODING
- if (isUsingLargeHeapBlock)
- {
- size = m_largeHeapBlockHeader->objectSize;
- }
- #else
- if (m_heapBlock->IsLargeHeapBlock())
- {
- size = ((LargeHeapBlock*)m_heapBlock)->GetObjectSize(m_address);
- }
- #endif
- else
- {
- // All small heap block types have the same layout for the object size field.
- size = ((SmallHeapBlock*)m_heapBlock)->GetObjectSize();
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- if (m_recycler->VerifyEnabled())
- {
- size -= *(size_t *)(((char *)m_address) + size - sizeof(size_t));
- }
- #endif
- return size;
- }
- template char* Recycler::AllocWithAttributesInlined<(Memory::ObjectInfoBits)32, false>(size_t);
- #ifdef RECYCLER_VISITED_HOST
- template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedFinalizableBits, /* nothrow = */true>(size_t);
- template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostFinalizableBits, /* nothrow = */true>(size_t);
- template char* Recycler::AllocZeroWithAttributesInlined<RecyclerVisitedHostTracedBits, /* nothrow = */true>(size_t);
- template char* Recycler::AllocZeroWithAttributesInlined<LeafBit, /* nothrow = */true>(size_t);
- #endif
|