Recycler.cpp 276 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_AMD64
  7. #include "amd64.h"
  8. #endif
  9. #ifdef _M_ARM
  10. #include "arm.h"
  11. #endif
  12. #ifdef _M_ARM64
  13. #include "arm64.h"
  14. #endif
  15. #include "Core/BinaryFeatureControl.h"
  16. #include "Common/ThreadService.h"
  17. #include "Memory/AutoAllocatorObjectPtr.h"
  18. DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
  19. #ifdef PROFILE_RECYCLER_ALLOC
  20. struct UnallocatedPortionOfBumpAllocatedBlock
  21. {
  22. };
  23. struct ExplicitFreeListedObject
  24. {
  25. };
  26. Recycler::TrackerData Recycler::TrackerData::EmptyData(&typeid(UnallocatedPortionOfBumpAllocatedBlock), false);
  27. Recycler::TrackerData Recycler::TrackerData::ExplicitFreeListObjectData(&typeid(ExplicitFreeListedObject), false);
  28. #endif
  29. enum ETWEventGCActivationKind : unsigned
  30. {
  31. ETWEvent_GarbageCollect = 0, // force in-thread GC
  32. ETWEvent_ThreadCollect = 1, // thread GC with wait
  33. ETWEvent_ConcurrentCollect = 2,
  34. ETWEvent_PartialCollect = 3,
  35. ETWEvent_ConcurrentMark = 11,
  36. ETWEvent_ConcurrentRescan = 12,
  37. ETWEvent_ConcurrentSweep = 13,
  38. ETWEvent_ConcurrentTransferSwept = 14,
  39. ETWEvent_ConcurrentFinishMark = 15,
  40. };
  41. DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
  42. inline bool
  43. DefaultRecyclerCollectionWrapper::IsCollectionDisabled(Recycler * recycler)
  44. {
  45. // GC shouldn't be triggered during heap enum, unless we missed a case where it allocate memory (which
  46. // shouldn't happen during heap enum) or for the case we explicitly allow allocation
  47. // REVIEW: isHeapEnumInProgress should have been a collection state and checked before to avoid a check here.
  48. // Collection will be disabled in VarDispEx because it could be called from projection re-entrance as ASTA allows
  49. // QI/AddRef/Release to come back.
  50. bool collectionDisabled = recycler->IsCollectionDisabled();
  51. #if DBG
  52. if (collectionDisabled)
  53. {
  54. // disabled collection should only happen if we allowed allocation during heap enum
  55. if (recycler->IsHeapEnumInProgress())
  56. {
  57. Assert(recycler->AllowAllocationDuringHeapEnum());
  58. }
  59. else
  60. {
  61. #ifdef ENABLE_PROJECTION
  62. Assert(recycler->IsInRefCountTrackingForProjection());
  63. #else
  64. Assert(false);
  65. #endif
  66. }
  67. }
  68. #endif
  69. return collectionDisabled;
  70. }
  71. BOOL DefaultRecyclerCollectionWrapper::ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags)
  72. {
  73. if (IsCollectionDisabled(recycler))
  74. {
  75. return FALSE;
  76. }
  77. BOOL ret = FALSE;
  78. BEGIN_NO_EXCEPTION
  79. {
  80. ret = (recycler->*(function))(flags);
  81. }
  82. END_NO_EXCEPTION;
  83. return ret;
  84. }
  85. void
  86. DefaultRecyclerCollectionWrapper::DisposeObjects(Recycler * recycler)
  87. {
  88. if (IsCollectionDisabled(recycler))
  89. {
  90. return;
  91. }
  92. BEGIN_NO_EXCEPTION
  93. {
  94. recycler->DisposeObjects();
  95. }
  96. END_NO_EXCEPTION;
  97. }
  98. static void* GetStackBase();
  99. template _ALWAYSINLINE char * Recycler::AllocWithAttributesInlined<NoBit, false>(size_t size);
  100. template _ALWAYSINLINE char* Recycler::RealAlloc<NoBit, false>(HeapInfo* heap, size_t size);
  101. template _ALWAYSINLINE _Ret_notnull_ void * __cdecl operator new<Recycler>(size_t byteSize, Recycler * alloc, char * (Recycler::*AllocFunc)(size_t));
  102. Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllocator * pageAllocator, void (*outOfMemoryFunc)(), Js::ConfigFlagsTable& configFlagsTable) :
  103. collectionState(CollectionStateNotCollecting),
  104. recyclerFlagsTable(configFlagsTable),
  105. recyclerPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, RecyclerHeuristic::Instance.DefaultMaxAllocPageCount),
  106. recyclerLargeBlockPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount),
  107. threadService(nullptr),
  108. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  109. recyclerWithBarrierPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, PageAllocator::DefaultMaxAllocPageCount, true),
  110. #endif
  111. threadPageAllocator(pageAllocator),
  112. markPagePool(configFlagsTable),
  113. parallelMarkPagePool1(configFlagsTable),
  114. parallelMarkPagePool2(configFlagsTable),
  115. parallelMarkPagePool3(configFlagsTable),
  116. markContext(this, &this->markPagePool),
  117. parallelMarkContext1(this, &this->parallelMarkPagePool1),
  118. parallelMarkContext2(this, &this->parallelMarkPagePool2),
  119. parallelMarkContext3(this, &this->parallelMarkPagePool3),
  120. #if ENABLE_PARTIAL_GC
  121. clientTrackedObjectAllocator(_u("CTO-List"), GetPageAllocator(), Js::Throw::OutOfMemory),
  122. #endif
  123. outOfMemoryFunc(outOfMemoryFunc),
  124. #ifdef RECYCLER_TEST_SUPPORT
  125. checkFn(NULL),
  126. #endif
  127. externalRootMarker(NULL),
  128. externalRootMarkerContext(NULL),
  129. recyclerSweep(nullptr),
  130. inEndMarkOnLowMemory(false),
  131. enableScanInteriorPointers(CUSTOM_CONFIG_FLAG(configFlagsTable, RecyclerForceMarkInterior)),
  132. enableScanImplicitRoots(false),
  133. disableCollectOnAllocationHeuristics(false),
  134. skipStack(false),
  135. mainThreadHandle(NULL),
  136. #if ENABLE_CONCURRENT_GC
  137. backgroundFinishMarkCount(0),
  138. hasPendingUnpinnedObject(false),
  139. hasPendingConcurrentFindRoot(false),
  140. queueTrackedObject(false),
  141. enableConcurrentMark(false), // Default to non-concurrent
  142. enableParallelMark(false),
  143. enableConcurrentSweep(false),
  144. concurrentThread(NULL),
  145. concurrentWorkReadyEvent(NULL),
  146. concurrentWorkDoneEvent(NULL),
  147. parallelThread1(this, &Recycler::ParallelWorkFunc<0>),
  148. parallelThread2(this, &Recycler::ParallelWorkFunc<1>),
  149. priorityBoost(false),
  150. isAborting(false),
  151. #if DBG
  152. concurrentThreadExited(true),
  153. isProcessingTrackedObjects(false),
  154. hasIncompleteDoCollect(false),
  155. isConcurrentGCOnIdle(false),
  156. isFinishGCOnIdle(false),
  157. #endif
  158. #ifdef IDLE_DECOMMIT_ENABLED
  159. concurrentIdleDecommitEvent(nullptr),
  160. #endif
  161. #endif
  162. #if DBG
  163. isExternalStackSkippingGC(false),
  164. isProcessingRescan(false),
  165. #endif
  166. #if ENABLE_PARTIAL_GC
  167. inPartialCollectMode(false),
  168. scanPinnedObjectMap(false),
  169. partialUncollectedAllocBytes(0),
  170. uncollectedNewPageCountPartialCollect((size_t)-1),
  171. #if ENABLE_CONCURRENT_GC
  172. partialConcurrentNextCollection(false),
  173. #endif
  174. #ifdef RECYCLER_STRESS
  175. forcePartialScanStack(false),
  176. #endif
  177. #endif
  178. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  179. isPrimaryMarkContextInitialized(false),
  180. #endif
  181. allowDispose(false),
  182. inDisposeWrapper(false),
  183. hasDisposableObject(false),
  184. tickCountNextDispose(0),
  185. hasPendingTransferDisposedObjects(false),
  186. transientPinnedObject(nullptr),
  187. pinnedObjectMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  188. weakReferenceMap(1024, HeapAllocator::GetNoMemProtectInstance()),
  189. weakReferenceCleanupId(0),
  190. collectionWrapper(&DefaultRecyclerCollectionWrapper::Instance),
  191. isScriptActive(false),
  192. isInScript(false),
  193. isShuttingDown(false),
  194. inExhaustiveCollection(false),
  195. hasExhaustiveCandidate(false),
  196. inDecommitNowCollection(false),
  197. inCacheCleanupCollection(false),
  198. hasPendingDeleteGuestArena(false),
  199. needOOMRescan(false),
  200. #if ENABLE_CONCURRENT_GC && ENABLE_PARTIAL_GC
  201. hasBackgroundFinishPartial(false),
  202. #endif
  203. decommitOnFinish(false)
  204. #ifdef PROFILE_EXEC
  205. , profiler(nullptr)
  206. , backgroundProfiler(nullptr)
  207. , backgroundProfilerPageAllocator(nullptr, configFlagsTable, PageAllocatorType_GCThread)
  208. , backgroundProfilerArena()
  209. #endif
  210. #ifdef PROFILE_MEM
  211. , memoryData(nullptr)
  212. #endif
  213. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  214. , objectGraphDumper(nullptr)
  215. , dumpObjectOnceOnCollect(false)
  216. #endif
  217. #ifdef PROFILE_RECYCLER_ALLOC
  218. , trackerDictionary(nullptr)
  219. #endif
  220. #ifdef HEAP_ENUMERATION_VALIDATION
  221. ,pfPostHeapEnumScanCallback(nullptr)
  222. #endif
  223. #ifdef NTBUILD
  224. , telemetryBlock(&localTelemetryBlock)
  225. #endif
  226. #ifdef ENABLE_JS_ETW
  227. ,bulkFreeMemoryWrittenCount(0)
  228. #endif
  229. #ifdef RECYCLER_PAGE_HEAP
  230. , isPageHeapEnabled(false)
  231. , capturePageHeapAllocStack(false)
  232. , capturePageHeapFreeStack(false)
  233. #endif
  234. , objectBeforeCollectCallbackMap(nullptr)
  235. , objectBeforeCollectCallbackState(ObjectBeforeCollectCallback_None)
  236. {
  237. #ifdef RECYCLER_MARK_TRACK
  238. this->markMap = NoCheckHeapNew(MarkMap, &NoCheckHeapAllocator::Instance, 163, &markMapCriticalSection);
  239. markContext.SetMarkMap(markMap);
  240. parallelMarkContext1.SetMarkMap(markMap);
  241. parallelMarkContext2.SetMarkMap(markMap);
  242. parallelMarkContext3.SetMarkMap(markMap);
  243. #endif
  244. #ifdef RECYCLER_MEMORY_VERIFY
  245. verifyPad = GetRecyclerFlagsTable().RecyclerVerifyPadSize;
  246. verifyEnabled = GetRecyclerFlagsTable().IsEnabled(Js::RecyclerVerifyFlag);
  247. if (verifyEnabled)
  248. {
  249. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  250. {
  251. pageAlloc->EnableVerify();
  252. });
  253. }
  254. #endif
  255. #ifdef RECYCLER_NO_PAGE_REUSE
  256. if (GetRecyclerFlagsTable().IsEnabled(Js::RecyclerNoPageReuseFlag))
  257. {
  258. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  259. {
  260. pageAlloc->DisablePageReuse();
  261. });
  262. }
  263. #endif
  264. this->inDispose = false;
  265. #if DBG
  266. this->heapBlockCount = 0;
  267. this->collectionCount = 0;
  268. this->disableThreadAccessCheck = false;
  269. #if ENABLE_CONCURRENT_GC
  270. this->disableConcurrentThreadExitedCheck = false;
  271. #endif
  272. #endif
  273. #if DBG || defined RECYCLER_TRACE
  274. this->inResolveExternalWeakReferences = false;
  275. #endif
  276. #if DBG || defined(RECYCLER_STATS)
  277. isForceSweeping = false;
  278. #endif
  279. #ifdef RECYCLER_FINALIZE_CHECK
  280. collectionStats.finalizeCount = 0;
  281. #endif
  282. RecyclerMemoryTracking::ReportRecyclerCreate(this);
  283. #if DBG_DUMP
  284. forceTraceMark = false;
  285. recyclerPageAllocator.debugName = _u("Recycler");
  286. recyclerLargeBlockPageAllocator.debugName = _u("RecyclerLargeBlock");
  287. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  288. recyclerWithBarrierPageAllocator.debugName = _u("RecyclerWithBarrier");
  289. #endif
  290. #endif
  291. isHeapEnumInProgress = false;
  292. isCollectionDisabled = false;
  293. #if DBG
  294. allowAllocationDuringRenentrance = false;
  295. allowAllocationDuringHeapEnum = false;
  296. #ifdef ENABLE_PROJECTION
  297. isInRefCountTrackingForProjection = false;
  298. #endif
  299. #endif
  300. ScheduleNextCollection();
  301. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  302. this->inDllCanUnloadNow = false;
  303. this->inDetachProcess = false;
  304. #endif
  305. #ifdef NTBUILD
  306. memset(&localTelemetryBlock, 0, sizeof(localTelemetryBlock));
  307. #endif
  308. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  309. // recycler requires at least Recycler::PrimaryMarkStackReservedPageCount to function properly for the main mark context
  310. this->markContext.SetMaxPageCount(max(static_cast<size_t>(GetRecyclerFlagsTable().MaxMarkStackPageCount), static_cast<size_t>(Recycler::PrimaryMarkStackReservedPageCount)));
  311. this->parallelMarkContext1.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  312. this->parallelMarkContext2.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  313. this->parallelMarkContext3.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
  314. if (GetRecyclerFlagsTable().IsEnabled(Js::GCMemoryThresholdFlag))
  315. {
  316. // Note, we can't do this in the constructor for RecyclerHeuristic::Instance because it runs before config is processed
  317. RecyclerHeuristic::Instance.ConfigureBaseFactor(GetRecyclerFlagsTable().GCMemoryThreshold);
  318. }
  319. #endif
  320. }
  321. #if DBG
  322. void
  323. Recycler::SetDisableThreadAccessCheck()
  324. {
  325. recyclerPageAllocator.SetDisableThreadAccessCheck();
  326. recyclerLargeBlockPageAllocator.SetDisableThreadAccessCheck();
  327. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  328. recyclerWithBarrierPageAllocator.SetDisableThreadAccessCheck();
  329. #endif
  330. disableThreadAccessCheck = true;
  331. }
  332. #endif
  333. void
  334. Recycler::SetMemProtectMode()
  335. {
  336. this->enableScanInteriorPointers = true;
  337. this->enableScanImplicitRoots = true;
  338. this->disableCollectOnAllocationHeuristics = true;
  339. #ifdef RECYCLER_STRESS
  340. this->recyclerStress = GetRecyclerFlagsTable().MemProtectHeapStress;
  341. #if ENABLE_CONCURRENT_GC
  342. this->recyclerBackgroundStress = GetRecyclerFlagsTable().MemProtectHeapBackgroundStress;
  343. this->recyclerConcurrentStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentStress;
  344. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentRepeatStress;
  345. #endif
  346. #if ENABLE_PARTIAL_GC
  347. this->recyclerPartialStress = GetRecyclerFlagsTable().MemProtectHeapPartialStress;
  348. #endif
  349. #endif
  350. }
  351. void
  352. Recycler::LogMemProtectHeapSize(bool fromGC)
  353. {
  354. Assert(IsMemProtectMode());
  355. #ifdef ENABLE_JS_ETW
  356. if (IS_JS_ETW(EventEnabledMEMPROTECT_GC_HEAP_SIZE()))
  357. {
  358. IdleDecommitPageAllocator* recyclerPageAllocator = GetRecyclerPageAllocator();
  359. IdleDecommitPageAllocator* recyclerLeafPageAllocator = GetRecyclerLeafPageAllocator();
  360. IdleDecommitPageAllocator* recyclerLargeBlockPageAllocator = GetRecyclerLargeBlockPageAllocator();
  361. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  362. IdleDecommitPageAllocator* recyclerWithBarrierPageAllocator = GetRecyclerWithBarrierPageAllocator();
  363. #endif
  364. size_t usedBytes = (recyclerPageAllocator->usedBytes + recyclerLeafPageAllocator->usedBytes +
  365. recyclerLargeBlockPageAllocator->usedBytes);
  366. size_t reservedBytes = (recyclerPageAllocator->reservedBytes + recyclerLeafPageAllocator->reservedBytes +
  367. recyclerLargeBlockPageAllocator->reservedBytes);
  368. size_t committedBytes = (recyclerPageAllocator->committedBytes + recyclerLeafPageAllocator->committedBytes +
  369. recyclerLargeBlockPageAllocator->committedBytes);
  370. size_t numberOfSegments = (recyclerPageAllocator->numberOfSegments +
  371. recyclerLeafPageAllocator->numberOfSegments +
  372. recyclerLargeBlockPageAllocator->numberOfSegments);
  373. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  374. usedBytes += recyclerWithBarrierPageAllocator->usedBytes;
  375. reservedBytes += recyclerWithBarrierPageAllocator->reservedBytes;
  376. committedBytes += recyclerWithBarrierPageAllocator->committedBytes;
  377. numberOfSegments += recyclerWithBarrierPageAllocator->numberOfSegments;
  378. #endif
  379. JS_ETW(EventWriteMEMPROTECT_GC_HEAP_SIZE(this, usedBytes, reservedBytes, committedBytes, numberOfSegments, fromGC));
  380. }
  381. #endif
  382. }
  383. #if DBG
  384. void
  385. Recycler::SetDisableConcurrentThreadExitedCheck()
  386. {
  387. #if ENABLE_CONCURRENT_GC
  388. disableConcurrentThreadExitedCheck = true;
  389. #endif
  390. #ifdef RECYCLER_STRESS
  391. this->recyclerStress = false;
  392. #if ENABLE_CONCURRENT_GC
  393. this->recyclerBackgroundStress = false;
  394. this->recyclerConcurrentStress = false;
  395. this->recyclerConcurrentRepeatStress = false;
  396. #endif
  397. #if ENABLE_PARTIAL_GC
  398. this->recyclerPartialStress = false;
  399. #endif
  400. #endif
  401. }
  402. #endif
  403. #if DBG
  404. void
  405. Recycler::ResetThreadId()
  406. {
  407. // Transfer all the page allocator to the current thread id
  408. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  409. {
  410. pageAlloc->ClearConcurrentThreadId();
  411. });
  412. #if ENABLE_CONCURRENT_GC
  413. if (this->IsConcurrentEnabled())
  414. {
  415. markContext.GetPageAllocator()->ClearConcurrentThreadId();
  416. }
  417. #endif
  418. #if defined(DBG) && defined(PROFILE_EXEC)
  419. this->backgroundProfilerPageAllocator.ClearConcurrentThreadId();
  420. #endif
  421. }
  422. #endif
  423. Recycler::~Recycler()
  424. {
  425. #if ENABLE_CONCURRENT_GC
  426. Assert(!this->isAborting);
  427. #endif
  428. #if DBG
  429. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  430. {
  431. if (recyclerList == this)
  432. {
  433. recyclerList = this->next;
  434. }
  435. else
  436. {
  437. Recycler* list = recyclerList;
  438. while (list->next != this)
  439. {
  440. list = list->next;
  441. }
  442. list->next = this->next;
  443. }
  444. }
  445. #endif
  446. // Stop any further collection
  447. this->isShuttingDown = true;
  448. #if DBG
  449. this->ResetThreadId();
  450. #endif
  451. #ifdef ENABLE_JS_ETW
  452. FlushFreeRecord();
  453. #endif
  454. ClearObjectBeforeCollectCallbacks();
  455. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  456. if (GetRecyclerFlagsTable().DumpObjectGraphOnExit)
  457. {
  458. // Always skip stack here, as we may be running the dtor on another thread.
  459. RecyclerObjectGraphDumper::Param param = { 0 };
  460. param.skipStack = true;
  461. this->DumpObjectGraph(&param);
  462. }
  463. #endif
  464. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): %s"), this, this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") :
  465. this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  466. #ifdef LEAK_REPORT
  467. ReportLeaks();
  468. #endif
  469. #ifdef CHECK_MEMORY_LEAK
  470. CheckLeaks(this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") : this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
  471. #endif
  472. AUTO_LEAK_REPORT_SECTION_0(this->GetRecyclerFlagsTable(), _u("Skipped finalizers"));
  473. #if ENABLE_CONCURRENT_GC
  474. Assert(concurrentThread == nullptr);
  475. // We only sometime clean up the state after abort concurrent to not collection
  476. // Still need to delete heap block that is held by the recyclerSweep
  477. if (recyclerSweep != nullptr)
  478. {
  479. recyclerSweep->ShutdownCleanup();
  480. recyclerSweep = nullptr;
  481. }
  482. if (mainThreadHandle != nullptr)
  483. {
  484. CloseHandle(mainThreadHandle);
  485. }
  486. #endif
  487. recyclerPageAllocator.Close();
  488. recyclerLargeBlockPageAllocator.Close();
  489. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  490. recyclerWithBarrierPageAllocator.Close();
  491. #endif
  492. markContext.Release();
  493. parallelMarkContext1.Release();
  494. parallelMarkContext2.Release();
  495. parallelMarkContext3.Release();
  496. // Clean up the weak reference map so that
  497. // objects being finalized can safely refer to weak references
  498. // (this could otherwise become a problem for weak references held
  499. // to large objects since their block would be destroyed before
  500. // the finalizer was run)
  501. // When the recycler is shutting down, all objects are going to be reclaimed
  502. // so null out the weak references so that anyone relying on weak
  503. // references simply thinks the object has been reclaimed
  504. weakReferenceMap.Map([](RecyclerWeakReferenceBase * weakRef) -> bool
  505. {
  506. weakRef->strongRef = nullptr;
  507. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  508. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  509. // Remove
  510. return false;
  511. });
  512. #if ENABLE_PARTIAL_GC
  513. clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  514. #endif
  515. #ifdef PROFILE_RECYCLER_ALLOC
  516. if (trackerDictionary != nullptr)
  517. {
  518. this->trackerDictionary->Map([](type_info const *, TrackerItem * item)
  519. {
  520. NoCheckHeapDelete(item);
  521. });
  522. NoCheckHeapDelete(this->trackerDictionary);
  523. this->trackerDictionary = nullptr;
  524. ::DeleteCriticalSection(&trackerCriticalSection);
  525. }
  526. #endif
  527. #ifdef RECYCLER_MARK_TRACK
  528. NoCheckHeapDelete(this->markMap);
  529. this->markMap = nullptr;
  530. #endif
  531. #if DBG
  532. // Disable idle decommit asserts
  533. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  534. {
  535. pageAlloc->ShutdownIdleDecommit();
  536. });
  537. #endif
  538. Assert(this->collectionState == CollectionStateExit || this->collectionState == CollectionStateNotCollecting);
  539. #if ENABLE_CONCURRENT_GC
  540. Assert(this->disableConcurrentThreadExitedCheck || this->concurrentThreadExited == true);
  541. #endif
  542. }
  543. void
  544. Recycler::SetIsThreadBound()
  545. {
  546. Assert(mainThreadHandle == nullptr);
  547. ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), &mainThreadHandle,
  548. 0, FALSE, DUPLICATE_SAME_ACCESS);
  549. stackBase = GetStackBase();
  550. }
  551. void
  552. Recycler::RootAddRef(void* obj, uint *count)
  553. {
  554. Assert(this->IsValidObject(obj));
  555. if (transientPinnedObject)
  556. {
  557. PinRecord& refCount = pinnedObjectMap.GetReference(transientPinnedObject);
  558. ++refCount;
  559. if (refCount == 1)
  560. {
  561. this->scanPinnedObjectMap = true;
  562. RECYCLER_PERF_COUNTER_INC(PinnedObject);
  563. }
  564. #ifdef STACK_BACK_TRACE
  565. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  566. if (GetRecyclerFlagsTable().LeakStackTrace)
  567. {
  568. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount.stackBackTraces,
  569. transientPinnedObjectStackBackTrace);
  570. }
  571. #endif
  572. #endif
  573. }
  574. if (count != nullptr)
  575. {
  576. PinRecord* refCount = pinnedObjectMap.TryGetReference(obj);
  577. *count = (refCount != nullptr) ? (*refCount + 1) : 1;
  578. }
  579. transientPinnedObject = obj;
  580. #ifdef STACK_BACK_TRACE
  581. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  582. if (GetRecyclerFlagsTable().LeakStackTrace)
  583. {
  584. transientPinnedObjectStackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance);
  585. }
  586. #endif
  587. #endif
  588. }
  589. void
  590. Recycler::RootRelease(void* obj, uint *count)
  591. {
  592. Assert(this->IsValidObject(obj));
  593. if (transientPinnedObject == obj)
  594. {
  595. transientPinnedObject = nullptr;
  596. if (count != nullptr)
  597. {
  598. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  599. *count = (refCount != nullptr) ? *refCount : 0;
  600. }
  601. #ifdef STACK_BACK_TRACE
  602. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  603. if (GetRecyclerFlagsTable().LeakStackTrace)
  604. {
  605. transientPinnedObjectStackBackTrace->Delete(&NoCheckHeapAllocator::Instance);
  606. }
  607. #endif
  608. #endif
  609. }
  610. else
  611. {
  612. PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
  613. if (refCount == nullptr)
  614. {
  615. if (count != nullptr)
  616. {
  617. *count = (uint)-1;
  618. }
  619. // REVIEW: throw if not found
  620. Assert(false);
  621. return;
  622. }
  623. uint newRefCount = (--(*refCount));
  624. if (count != nullptr)
  625. {
  626. *count = newRefCount;
  627. }
  628. if (newRefCount != 0)
  629. {
  630. #ifdef STACK_BACK_TRACE
  631. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  632. if (GetRecyclerFlagsTable().LeakStackTrace)
  633. {
  634. StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces,
  635. StackBackTrace::Capture(&NoCheckHeapAllocator::Instance));
  636. }
  637. #endif
  638. #endif
  639. return;
  640. }
  641. #ifdef STACK_BACK_TRACE
  642. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  643. StackBackTraceNode::DeleteAll(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces);
  644. refCount->stackBackTraces = nullptr;
  645. #endif
  646. #endif
  647. #if ENABLE_CONCURRENT_GC
  648. // Don't delete the entry if we are in concurrent find root state
  649. // We will delete it later on in-thread find root
  650. if (this->hasPendingConcurrentFindRoot)
  651. {
  652. this->hasPendingUnpinnedObject = true;
  653. }
  654. else
  655. #endif
  656. {
  657. pinnedObjectMap.Remove(obj);
  658. }
  659. RECYCLER_PERF_COUNTER_DEC(PinnedObject);
  660. }
  661. // Not a real collection. This doesn't activate GC.
  662. // This tell the GC that we have an exhaustive candidate, and should trigger
  663. // another GC if there is an exhaustive GC going on.
  664. this->CollectNow<CollectExhaustiveCandidate>();
  665. }
  666. #if DBG
  667. Recycler* Recycler::recyclerList = nullptr;
  668. #endif
  669. void
  670. Recycler::Initialize(const bool forceInThread, JsUtil::ThreadService *threadService, const bool deferThreadStartup
  671. #ifdef RECYCLER_PAGE_HEAP
  672. , PageHeapMode pageheapmode
  673. , bool captureAllocCallStack
  674. , bool captureFreeCallStack
  675. #endif
  676. )
  677. {
  678. #ifdef PROFILE_RECYCLER_ALLOC
  679. this->InitializeProfileAllocTracker();
  680. #endif
  681. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  682. this->disableCollection = CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::RecyclerPhase);
  683. #endif
  684. #if ENABLE_CONCURRENT_GC
  685. this->skipStack = false;
  686. #endif
  687. #if ENABLE_PARTIAL_GC
  688. #if ENABLE_DEBUG_CONFIG_OPTIONS
  689. this->enablePartialCollect = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::PartialCollectPhase);
  690. #else
  691. this->enablePartialCollect = true;
  692. #endif
  693. #endif
  694. #ifdef PROFILE_MEM
  695. this->memoryData = MemoryProfiler::GetRecyclerMemoryData();
  696. #endif
  697. #if DBG || DBG_DUMP || defined(RECYCLER_TRACE)
  698. mainThreadId = GetCurrentThreadContextId();
  699. #endif
  700. #ifdef RECYCLER_TRACE
  701. collectionParam.domCollect = false;
  702. #endif
  703. #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING)
  704. bool dontNeedDetailedTracking = false;
  705. #if defined(PROFILE_RECYCLER_ALLOC)
  706. dontNeedDetailedTracking = dontNeedDetailedTracking || this->trackerDictionary == nullptr;
  707. #endif
  708. #if defined(RECYCLER_MEMORY_VERIFY)
  709. dontNeedDetailedTracking = dontNeedDetailedTracking || !this->verifyEnabled;
  710. #endif
  711. // If we need detailed tracking we force allocation fast path in the JIT to fail and go to the helper, so there is no
  712. // need for the TrackNativeAllocatedMemoryBlock callback.
  713. if (dontNeedDetailedTracking)
  714. {
  715. autoHeap.Initialize(this, TrackNativeAllocatedMemoryBlock
  716. #ifdef RECYCLER_PAGE_HEAP
  717. , pageheapmode
  718. , captureAllocCallStack
  719. , captureFreeCallStack
  720. #endif
  721. );
  722. }
  723. else
  724. {
  725. autoHeap.Initialize(this
  726. #ifdef RECYCLER_PAGE_HEAP
  727. , pageheapmode
  728. , captureAllocCallStack
  729. , captureFreeCallStack
  730. #endif
  731. );
  732. }
  733. #else
  734. autoHeap.Initialize(this
  735. #ifdef RECYCLER_PAGE_HEAP
  736. , pageheapmode
  737. , captureAllocCallStack
  738. , captureFreeCallStack
  739. #endif
  740. );
  741. #endif
  742. markContext.Init(Recycler::PrimaryMarkStackReservedPageCount);
  743. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  744. isPrimaryMarkContextInitialized = true;
  745. #endif
  746. #ifdef RECYCLER_PAGE_HEAP
  747. isPageHeapEnabled = autoHeap.IsPageHeapEnabled();
  748. if (IsPageHeapEnabled())
  749. {
  750. capturePageHeapAllocStack = autoHeap.captureAllocCallStack;
  751. capturePageHeapFreeStack = autoHeap.captureFreeCallStack;
  752. }
  753. #endif
  754. #ifdef RECYCLER_STRESS
  755. #if ENABLE_PARTIAL_GC
  756. if (GetRecyclerFlagsTable().RecyclerTrackStress)
  757. {
  758. // Disable partial if we are doing track stress, since partial relies on ClientTracked processing
  759. // and track stress doesn't support this.
  760. this->enablePartialCollect = false;
  761. }
  762. #endif
  763. this->recyclerStress = GetRecyclerFlagsTable().RecyclerStress;
  764. #if ENABLE_CONCURRENT_GC
  765. this->recyclerBackgroundStress = GetRecyclerFlagsTable().RecyclerBackgroundStress;
  766. this->recyclerConcurrentStress = GetRecyclerFlagsTable().RecyclerConcurrentStress;
  767. this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().RecyclerConcurrentRepeatStress;
  768. #endif
  769. #if ENABLE_PARTIAL_GC
  770. this->recyclerPartialStress = GetRecyclerFlagsTable().RecyclerPartialStress;
  771. #endif
  772. #endif
  773. #ifdef RECYCLER_WRITE_WATCH
  774. bool needWriteWatch = false;
  775. #endif
  776. #if ENABLE_CONCURRENT_GC
  777. // Default to non-concurrent
  778. uint numProcs = (uint)AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
  779. this->maxParallelism = (numProcs > 4) || CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) ? 4 : numProcs;
  780. if (forceInThread)
  781. {
  782. // Requested a non-concurrent recycler
  783. this->disableConcurrent = true;
  784. }
  785. #if ENABLE_DEBUG_CONFIG_OPTIONS
  786. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentCollectPhase))
  787. {
  788. // Concurrent collection disabled
  789. this->disableConcurrent = true;
  790. }
  791. else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase) &&
  792. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) &&
  793. CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase))
  794. {
  795. // All concurrent collection phases disabled
  796. this->disableConcurrent = true;
  797. }
  798. #endif
  799. else
  800. {
  801. this->disableConcurrent = false;
  802. if (deferThreadStartup || EnableConcurrent(threadService, false))
  803. {
  804. #ifdef RECYCLER_WRITE_WATCH
  805. needWriteWatch = true;
  806. #endif
  807. }
  808. }
  809. #endif // ENABLE_CONCURRENT_GC
  810. #if ENABLE_PARTIAL_GC
  811. if (this->enablePartialCollect)
  812. {
  813. #ifdef RECYCLER_WRITE_WATCH
  814. needWriteWatch = true;
  815. #endif
  816. }
  817. #endif
  818. #if ENABLE_CONCURRENT_GC
  819. #ifdef RECYCLER_WRITE_WATCH
  820. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  821. {
  822. if (needWriteWatch)
  823. {
  824. // need write watch to support concurrent and/or partial collection
  825. recyclerPageAllocator.EnableWriteWatch();
  826. recyclerLargeBlockPageAllocator.EnableWriteWatch();
  827. }
  828. }
  829. #endif
  830. #else
  831. Assert(!needWriteWatch);
  832. #endif
  833. #if DBG
  834. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  835. {
  836. this->next = recyclerList;
  837. recyclerList = this;
  838. }
  839. #endif
  840. }
  841. BOOL
  842. Recycler::CollectionInProgress() const
  843. {
  844. return collectionState != CollectionStateNotCollecting;
  845. }
  846. BOOL
  847. Recycler::IsExiting() const
  848. {
  849. return (collectionState == Collection_Exit);
  850. }
  851. BOOL
  852. Recycler::IsSweeping() const
  853. {
  854. return ((collectionState & Collection_Sweep) == Collection_Sweep);
  855. }
  856. void
  857. Recycler::SetIsScriptActive(bool isScriptActive)
  858. {
  859. Assert(this->isInScript);
  860. Assert(this->isScriptActive != isScriptActive);
  861. this->isScriptActive = isScriptActive;
  862. if (isScriptActive)
  863. {
  864. this->tickCountNextDispose = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  865. }
  866. }
  867. void
  868. Recycler::SetIsInScript(bool isInScript)
  869. {
  870. Assert(this->isInScript != isInScript);
  871. this->isInScript = isInScript;
  872. }
  873. bool
  874. Recycler::NeedOOMRescan() const
  875. {
  876. return this->needOOMRescan;
  877. }
  878. void
  879. Recycler::SetNeedOOMRescan()
  880. {
  881. this->needOOMRescan = true;
  882. }
  883. void
  884. Recycler::ClearNeedOOMRescan()
  885. {
  886. this->needOOMRescan = false;
  887. markContext.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  888. parallelMarkContext1.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  889. parallelMarkContext2.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  890. parallelMarkContext3.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
  891. }
  892. bool
  893. Recycler::IsMemProtectMode()
  894. {
  895. return this->enableScanImplicitRoots;
  896. }
  897. size_t
  898. Recycler::GetUsedBytes()
  899. {
  900. size_t usedBytes = threadPageAllocator->usedBytes;
  901. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  902. usedBytes += recyclerWithBarrierPageAllocator.usedBytes;
  903. #endif
  904. usedBytes += recyclerPageAllocator.usedBytes;
  905. usedBytes += recyclerLargeBlockPageAllocator.usedBytes;
  906. #if GLOBAL_ENABLE_WRITE_BARRIER
  907. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  908. {
  909. Assert(recyclerPageAllocator.usedBytes == 0);
  910. }
  911. #endif
  912. return usedBytes;
  913. }
  914. IdleDecommitPageAllocator*
  915. Recycler::GetRecyclerPageAllocator()
  916. {
  917. // TODO: SWB this is for Finalizable leaf allocation, which we didn't implement leaf bucket for it
  918. // remove this after the finalizable leaf bucket is implemented
  919. #if GLOBAL_ENABLE_WRITE_BARRIER
  920. if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
  921. {
  922. return &this->recyclerWithBarrierPageAllocator;
  923. }
  924. else
  925. #endif
  926. {
  927. #ifdef RECYCLER_WRITE_WATCH
  928. return &this->recyclerPageAllocator;
  929. #else
  930. return &this->recyclerWithBarrierPageAllocator;
  931. #endif
  932. }
  933. }
  934. IdleDecommitPageAllocator*
  935. Recycler::GetRecyclerLargeBlockPageAllocator()
  936. {
  937. return &this->recyclerLargeBlockPageAllocator;
  938. }
  939. IdleDecommitPageAllocator*
  940. Recycler::GetRecyclerLeafPageAllocator()
  941. {
  942. return this->threadPageAllocator;
  943. }
  944. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  945. IdleDecommitPageAllocator*
  946. Recycler::GetRecyclerWithBarrierPageAllocator()
  947. {
  948. return &this->recyclerWithBarrierPageAllocator;
  949. }
  950. #endif
  951. #if DBG
  952. BOOL
  953. Recycler::IsFreeObject(void * candidate)
  954. {
  955. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  956. if (heapBlock != NULL)
  957. {
  958. return heapBlock->IsFreeObject(candidate);
  959. }
  960. return false;
  961. }
  962. #endif
  963. BOOL
  964. Recycler::IsValidObject(void* candidate, size_t minimumSize)
  965. {
  966. HeapBlock * heapBlock = this->FindHeapBlock(candidate);
  967. if (heapBlock != NULL)
  968. {
  969. return heapBlock->IsValidObject(candidate) && (minimumSize == 0 || heapBlock->GetObjectSize(candidate) >= minimumSize);
  970. }
  971. return false;
  972. }
  973. void
  974. Recycler::Prime()
  975. {
  976. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  977. if (GetRecyclerFlagsTable().IsEnabled(Js::ForceFragmentAddressSpaceFlag))
  978. {
  979. // Never prime the recycler if we are forced to fragment address space
  980. return;
  981. }
  982. #endif
  983. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  984. {
  985. pageAlloc->Prime(RecyclerPageAllocator::DefaultPrimePageCount);
  986. });
  987. }
  988. void
  989. Recycler::AddExternalMemoryUsage(size_t size)
  990. {
  991. this->autoHeap.uncollectedAllocBytes += size;
  992. this->autoHeap.uncollectedExternalBytes += size;
  993. // Generally normal GC can cleanup the uncollectedAllocBytes. But if external components
  994. // do fast large allocations in a row, normal GC might not kick in. Let's force the GC
  995. // here if we need to collect anyhow.
  996. CollectNow<CollectOnAllocation>();
  997. }
  998. BOOL Recycler::ReportExternalMemoryAllocation(size_t size)
  999. {
  1000. return recyclerPageAllocator.RequestAlloc(size);
  1001. }
  1002. void Recycler::ReportExternalMemoryFailure(size_t size)
  1003. {
  1004. recyclerPageAllocator.ReportFailure(size);
  1005. }
  1006. void Recycler::ReportExternalMemoryFree(size_t size)
  1007. {
  1008. recyclerPageAllocator.ReportFree(size);
  1009. }
  1010. /*------------------------------------------------------------------------------------------------
  1011. * Idle Decommit
  1012. *------------------------------------------------------------------------------------------------*/
  1013. void
  1014. Recycler::EnterIdleDecommit()
  1015. {
  1016. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  1017. {
  1018. pageAlloc->EnterIdleDecommit();
  1019. });
  1020. #ifdef IDLE_DECOMMIT_ENABLED
  1021. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_None, IdleDecommitSignal_NeedTimer);
  1022. #endif
  1023. }
  1024. void
  1025. Recycler::LeaveIdleDecommit()
  1026. {
  1027. #ifdef IDLE_DECOMMIT_ENABLED
  1028. bool allowTimer = (this->concurrentIdleDecommitEvent != nullptr);
  1029. IdleDecommitSignal idleDecommitSignalRecycler = recyclerPageAllocator.LeaveIdleDecommit(allowTimer);
  1030. IdleDecommitSignal idleDecommitSignalRecyclerLargeBlock = recyclerLargeBlockPageAllocator.LeaveIdleDecommit(allowTimer);
  1031. IdleDecommitSignal idleDecommitSignal = max(idleDecommitSignalRecycler, idleDecommitSignalRecyclerLargeBlock);
  1032. IdleDecommitSignal idleDecommitSignalThread = threadPageAllocator->LeaveIdleDecommit(allowTimer);
  1033. idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalThread);
  1034. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  1035. IdleDecommitSignal idleDecommitSignalRecyclerWithBarrier = recyclerWithBarrierPageAllocator.LeaveIdleDecommit(allowTimer);
  1036. idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalRecyclerWithBarrier);
  1037. #endif
  1038. if (idleDecommitSignal != IdleDecommitSignal_None)
  1039. {
  1040. Assert(allowTimer);
  1041. // Reduce the number of times we need to signal the background thread
  1042. // by detecting whether the thread is waiting on a time out or not
  1043. if (idleDecommitSignal == IdleDecommitSignal_NeedSignal ||
  1044. ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedTimer, IdleDecommitSignal_None) == IdleDecommitSignal_NeedSignal)
  1045. {
  1046. #if DBG
  1047. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  1048. {
  1049. Output::Print(_u("Recycler Thread IdleDecommit Need Signal\n"));
  1050. Output::Flush();
  1051. }
  1052. #endif
  1053. SetEvent(this->concurrentIdleDecommitEvent);
  1054. }
  1055. }
  1056. #else
  1057. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  1058. {
  1059. pageAlloc->LeaveIdleDecommit(false);
  1060. });
  1061. #endif
  1062. }
  1063. /*------------------------------------------------------------------------------------------------
  1064. * Freeing
  1065. *------------------------------------------------------------------------------------------------*/
  1066. bool Recycler::ExplicitFreeLeaf(void* buffer, size_t size)
  1067. {
  1068. return ExplicitFreeInternalWrapper<ObjectInfoBits::LeafBit>(buffer, size);
  1069. }
  1070. bool Recycler::ExplicitFreeNonLeaf(void* buffer, size_t size)
  1071. {
  1072. return ExplicitFreeInternalWrapper<ObjectInfoBits::NoBit>(buffer, size);
  1073. }
  1074. size_t Recycler::GetAllocSize(size_t size)
  1075. {
  1076. size_t allocSize = size;
  1077. #ifdef RECYCLER_MEMORY_VERIFY
  1078. if (this->VerifyEnabled())
  1079. {
  1080. allocSize += verifyPad + sizeof(size_t);
  1081. Assert(allocSize > size);
  1082. }
  1083. #endif
  1084. return allocSize;
  1085. }
  1086. template <typename TBlockAttributes>
  1087. void Recycler::SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes)
  1088. {
  1089. Assert(!heapBlock->IsLargeHeapBlock());
  1090. Assert(heapBlock->GetObjectSize(buffer) == sizeCat);
  1091. SmallHeapBlockT<TBlockAttributes>* smallBlock = (SmallHeapBlockT<TBlockAttributes>*)heapBlock;
  1092. if ((attributes & ObjectInfoBits::LeafBit) == LeafBit)
  1093. {
  1094. Assert(smallBlock->IsLeafBlock());
  1095. }
  1096. else
  1097. {
  1098. Assert(smallBlock->IsAnyNormalBlock());
  1099. }
  1100. #ifdef RECYCLER_MEMORY_VERIFY
  1101. smallBlock->SetExplicitFreeBitForObject(buffer);
  1102. #endif
  1103. }
  1104. template <ObjectInfoBits attributes>
  1105. bool Recycler::ExplicitFreeInternalWrapper(void* buffer, size_t size)
  1106. {
  1107. Assert(buffer != nullptr);
  1108. Assert(size > 0);
  1109. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  1110. if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ExplicitFreePhase))
  1111. {
  1112. return false;
  1113. }
  1114. #endif
  1115. size_t allocSize = GetAllocSize(size);
  1116. if (HeapInfo::IsSmallObject(allocSize))
  1117. {
  1118. return ExplicitFreeInternal<attributes, SmallAllocationBlockAttributes>(buffer, size, HeapInfo::GetAlignedSizeNoCheck(allocSize));
  1119. }
  1120. if (HeapInfo::IsMediumObject(allocSize))
  1121. {
  1122. return ExplicitFreeInternal<attributes, MediumAllocationBlockAttributes>(buffer, size, HeapInfo::GetMediumObjectAlignedSizeNoCheck(allocSize));
  1123. }
  1124. return false;
  1125. }
  1126. template <ObjectInfoBits attributes, typename TBlockAttributes>
  1127. bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
  1128. {
  1129. // If the GC is in sweep state while FreeInternal is called, we might be executing a finalizer
  1130. // which called Free, which would cause a "sweepable" buffer to be free-listed. Don't allow this.
  1131. // Also don't allow freeing while we're shutting down the recycler since finalizers get executed
  1132. // at this stage too
  1133. if (this->IsSweeping() || this->IsExiting())
  1134. {
  1135. return false;
  1136. }
  1137. #if ENABLE_CONCURRENT_GC
  1138. // We shouldn't be freeing object when we are running GC in thread
  1139. Assert(this->IsConcurrentState() || !this->CollectionInProgress() || this->collectionState == CollectionStatePostCollectionCallback);
  1140. #else
  1141. Assert(!this->CollectionInProgress() || this->collectionState == CollectionStatePostCollectionCallback);
  1142. #endif
  1143. DebugOnly(RecyclerHeapObjectInfo info);
  1144. Assert(this->FindHeapObject(buffer, FindHeapObjectFlags_NoFreeBitVerify, info));
  1145. Assert((info.GetAttributes() & ~ObjectInfoBits::LeafBit) == 0); // Only NoBit or LeafBit
  1146. #if DBG || defined(RECYCLER_MEMORY_VERIFY) || defined(RECYCLER_PAGE_HEAP)
  1147. // Either the mainThreadHandle is null (we're not thread bound)
  1148. // or we should be calling this function on the main script thread
  1149. Assert(this->mainThreadHandle == NULL ||
  1150. ::GetCurrentThreadId() == ::GetThreadId(this->mainThreadHandle));
  1151. HeapBlock* heapBlock = this->FindHeapBlock(buffer);
  1152. Assert(heapBlock != nullptr);
  1153. #ifdef RECYCLER_PAGE_HEAP
  1154. if (this->IsPageHeapEnabled())
  1155. {
  1156. #ifdef STACK_BACK_TRACE
  1157. if (this->ShouldCapturePageHeapFreeStack())
  1158. {
  1159. if (heapBlock->IsLargeHeapBlock())
  1160. {
  1161. LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
  1162. if (largeHeapBlock->InPageHeapMode())
  1163. {
  1164. largeHeapBlock->CapturePageHeapFreeStack();
  1165. }
  1166. }
  1167. }
  1168. #endif
  1169. // Don't do actual explicit free in page heap mode
  1170. return false;
  1171. }
  1172. #endif
  1173. SetExplicitFreeBitOnSmallBlock<TBlockAttributes>(heapBlock, sizeCat, buffer, attributes);
  1174. #endif
  1175. if (TBlockAttributes::IsMediumBlock)
  1176. {
  1177. autoHeap.FreeMediumObject<attributes>(buffer, sizeCat);
  1178. }
  1179. else
  1180. {
  1181. autoHeap.FreeSmallObject<attributes>(buffer, sizeCat);
  1182. }
  1183. if (size > sizeof(FreeObject) || TBlockAttributes::IsMediumBlock)
  1184. {
  1185. // Do this on the background somehow?
  1186. byte expectedFill = 0;
  1187. size_t fillSize = size - sizeof(FreeObject);
  1188. #ifdef RECYCLER_MEMORY_VERIFY
  1189. if (this->VerifyEnabled())
  1190. {
  1191. expectedFill = Recycler::VerifyMemFill;
  1192. }
  1193. #endif
  1194. memset(((char*)buffer) + sizeof(FreeObject), expectedFill, fillSize);
  1195. }
  1196. #ifdef PROFILE_RECYCLER_ALLOC
  1197. if (this->trackerDictionary != nullptr)
  1198. {
  1199. this->SetTrackerData(buffer, &TrackerData::ExplicitFreeListObjectData);
  1200. }
  1201. #endif
  1202. return true;
  1203. }
  1204. /*------------------------------------------------------------------------------------------------
  1205. * Allocation
  1206. *------------------------------------------------------------------------------------------------*/
  1207. char *
  1208. Recycler::TryLargeAlloc(HeapInfo * heap, size_t size, ObjectInfoBits attributes, bool nothrow)
  1209. {
  1210. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1211. Assert(size != 0);
  1212. size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);
  1213. if (sizeCat == 0)
  1214. {
  1215. // overflow scenario
  1216. // if onthrow is false, throw out of memory
  1217. // otherwise, return null
  1218. if (nothrow == false)
  1219. {
  1220. this->OutOfMemory();
  1221. }
  1222. return nullptr;
  1223. }
  1224. char * memBlock;
  1225. if (heap->largeObjectBucket.largeBlockList != nullptr)
  1226. {
  1227. memBlock = heap->largeObjectBucket.largeBlockList->Alloc(sizeCat, attributes);
  1228. if (memBlock != nullptr)
  1229. {
  1230. #ifdef RECYCLER_ZERO_MEM_CHECK
  1231. VerifyZeroFill(memBlock, sizeCat);
  1232. #endif
  1233. return memBlock;
  1234. }
  1235. }
  1236. // We don't care whether a GC happened here or not, because we are not reusing freed
  1237. // large objects. We might try to allocate from existing block if we implement
  1238. // large object reuse.
  1239. if (!this->disableCollectOnAllocationHeuristics)
  1240. {
  1241. CollectNow<CollectOnAllocation>();
  1242. }
  1243. #ifdef RECYCLER_PAGE_HEAP
  1244. if (IsPageHeapEnabled())
  1245. {
  1246. if (heap->largeObjectBucket.IsPageHeapEnabled(attributes))
  1247. {
  1248. memBlock = heap->largeObjectBucket.PageHeapAlloc(this, sizeCat, size, (ObjectInfoBits)attributes, autoHeap.pageHeapMode, nothrow);
  1249. if (memBlock != nullptr)
  1250. {
  1251. #ifdef RECYCLER_ZERO_MEM_CHECK
  1252. VerifyZeroFill(memBlock, size);
  1253. #endif
  1254. return memBlock;
  1255. }
  1256. }
  1257. }
  1258. #endif
  1259. LargeHeapBlock * heapBlock = heap->AddLargeHeapBlock(sizeCat);
  1260. if (heapBlock == nullptr)
  1261. {
  1262. return nullptr;
  1263. }
  1264. memBlock = heapBlock->Alloc(sizeCat, attributes);
  1265. Assert(memBlock != nullptr);
  1266. #ifdef RECYCLER_ZERO_MEM_CHECK
  1267. VerifyZeroFill(memBlock, sizeCat);
  1268. #endif
  1269. return memBlock;
  1270. }
  1271. template <bool nothrow>
  1272. char*
  1273. Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes)
  1274. {
  1275. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  1276. char * addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1277. if (addr == nullptr)
  1278. {
  1279. // Force a collection and try to allocate again.
  1280. this->CollectNow<CollectNowForceInThread>();
  1281. addr = TryLargeAlloc(heap, size, attributes, nothrow);
  1282. if (addr == nullptr)
  1283. {
  1284. if (nothrow == false)
  1285. {
  1286. // Still fails, we are out of memory
  1287. // Since nothrow is false, it's okay to throw here
  1288. this->OutOfMemory();
  1289. }
  1290. else
  1291. {
  1292. return nullptr;
  1293. }
  1294. }
  1295. }
  1296. autoHeap.uncollectedAllocBytes += size;
  1297. return addr;
  1298. }
  1299. // Explicitly instantiate both versions of LargeAlloc
  1300. template char* Recycler::LargeAlloc<true>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1301. template char* Recycler::LargeAlloc<false>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
  1302. void
  1303. Recycler::OutOfMemory()
  1304. {
  1305. outOfMemoryFunc();
  1306. }
  1307. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(void* recyclerAddr, size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1308. {
  1309. Assert(recyclerAddr);
  1310. return ((Recycler*)recyclerAddr)->GetNormalHeapBlockAllocatorInfoForNativeAllocation(allocSize, allocatorAddress, endAddressOffset, freeListOffset, allowBumpAllocation, isOOPJIT);
  1311. }
  1312. void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
  1313. {
  1314. Assert(HeapInfo::IsAlignedSize(allocSize));
  1315. Assert(HeapInfo::IsSmallObject(allocSize));
  1316. allocatorAddress = (char*)this + offsetof(Recycler, autoHeap) + offsetof(HeapInfo, heapBuckets) +
  1317. sizeof(HeapBucketGroup<SmallAllocationBlockAttributes>)*((uint)(allocSize >> HeapConstants::ObjectAllocationShift) - 1)
  1318. + HeapBucketGroup<SmallAllocationBlockAttributes>::GetHeapBucketOffset()
  1319. + HeapBucketT<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetAllocatorHeadOffset();
  1320. endAddressOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetEndAddressOffset();
  1321. freeListOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetFreeObjectListOffset();;
  1322. if (!isOOPJIT)
  1323. {
  1324. Assert(allocatorAddress == GetAddressOfAllocator<NoBit>(allocSize));
  1325. Assert(endAddressOffset == GetEndAddressOffset<NoBit>(allocSize));
  1326. Assert(freeListOffset == GetFreeObjectListOffset<NoBit>(allocSize));
  1327. Assert(allowBumpAllocation == AllowNativeCodeBumpAllocation());
  1328. }
  1329. if (!allowBumpAllocation)
  1330. {
  1331. freeListOffset = endAddressOffset;
  1332. }
  1333. }
  1334. bool Recycler::AllowNativeCodeBumpAllocation()
  1335. {
  1336. // In debug builds, if we need to track allocation info, we pretend there is no pointer-bump-allocation space
  1337. // on this page, so that we always fail the check in native code and go to helper, which does the tracking.
  1338. #ifdef PROFILE_RECYCLER_ALLOC
  1339. if (this->trackerDictionary != nullptr)
  1340. {
  1341. return false;
  1342. }
  1343. #endif
  1344. #ifdef RECYCLER_MEMORY_VERIFY
  1345. if (this->verifyEnabled)
  1346. {
  1347. return false;
  1348. }
  1349. #endif
  1350. #ifdef RECYCLER_PAGE_HEAP
  1351. // Don't allow bump allocation in the JIT when page heap is turned on
  1352. if (this->IsPageHeapEnabled())
  1353. {
  1354. return false;
  1355. }
  1356. #endif
  1357. return true;
  1358. }
  1359. void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBlock, size_t sizeCat)
  1360. {
  1361. Assert(HeapInfo::IsAlignedSize(sizeCat));
  1362. Assert(HeapInfo::IsSmallObject(sizeCat));
  1363. #ifdef PROFILE_RECYCLER_ALLOC
  1364. AssertMsg(!Recycler::DoProfileAllocTracker(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1365. #endif
  1366. RecyclerMemoryTracking::ReportAllocation(recycler, memBlock, sizeCat);
  1367. RECYCLER_PERF_COUNTER_INC(LiveObject);
  1368. RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, sizeCat);
  1369. RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, sizeCat);
  1370. #ifdef RECYCLER_MEMORY_VERIFY
  1371. AssertMsg(!recycler->VerifyEnabled(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
  1372. #endif
  1373. }
  1374. /*------------------------------------------------------------------------------------------------
  1375. * FindRoots
  1376. *------------------------------------------------------------------------------------------------*/
  1377. // xplat-todo: Unify these two variants of GetStackBase
  1378. #ifdef _WIN32
  1379. static void* GetStackBase()
  1380. {
  1381. return ((NT_TIB *)NtCurrentTeb())->StackBase;
  1382. }
  1383. #else
  1384. static void* GetStackBase()
  1385. {
  1386. ULONG_PTR highLimit = 0;
  1387. ULONG_PTR lowLimit = 0;
  1388. ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
  1389. return (void*) highLimit;
  1390. }
  1391. #endif
  1392. #if _M_IX86
  1393. // REVIEW: For x86, do we care about scanning esp/ebp?
  1394. // At GC time, they shouldn't be pointing to GC memory.
  1395. #define SAVE_THREAD_CONTEXT() \
  1396. void** targetBuffer = this->savedThreadContext.GetRegisters(); \
  1397. __asm { push eax } \
  1398. __asm { mov eax, targetBuffer } \
  1399. __asm { mov [eax], esp} \
  1400. __asm { mov [eax+0x4], eax} \
  1401. __asm { mov [eax+0x8], ebx} \
  1402. __asm { mov [eax+0xc], ecx} \
  1403. __asm { mov [eax+0x10], edx} \
  1404. __asm { mov [eax+0x14], ebp} \
  1405. __asm { mov [eax+0x18], esi} \
  1406. __asm { mov [eax+0x1c], edi} \
  1407. __asm { pop eax }
  1408. #elif _M_ARM
  1409. #define SAVE_THREAD_CONTEXT() arm_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
  1410. #elif _M_ARM64
  1411. #define SAVE_THREAD_CONTEXT() arm64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
  1412. #elif _M_AMD64
  1413. #define SAVE_THREAD_CONTEXT() amd64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
  1414. #else
  1415. #error Unexpected architecture
  1416. #endif
  1417. size_t
  1418. Recycler::ScanArena(ArenaData * alloc, bool background)
  1419. {
  1420. #if DBG_DUMP
  1421. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1422. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1423. {
  1424. this->forceTraceMark = true;
  1425. Output::Print(_u("Scanning Guest Arena %p: "), alloc);
  1426. }
  1427. #endif
  1428. size_t scanRootBytes = 0;
  1429. BEGIN_DUMP_OBJECT_ADDRESS(_u("Guest Arena"), alloc);
  1430. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  1431. // The new write watch batching logic broke the write watch handling here.
  1432. // For now, just disable write watch for guest arenas.
  1433. // TODO: Re-enable this in the future.
  1434. #if FALSE
  1435. // Note, guest arenas are allocated out of the large block page allocator.
  1436. bool writeWatch = alloc->GetPageAllocator() == &this->recyclerLargeBlockPageAllocator;
  1437. // Only use write watch when we are doing rescan (Partial collect or finish concurrent)
  1438. if (writeWatch && this->collectionState == CollectionStateRescanFindRoots)
  1439. {
  1440. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetBigBlocks(background));
  1441. scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetFullBlocks());
  1442. }
  1443. else
  1444. #endif
  1445. #endif
  1446. {
  1447. scanRootBytes += TryMarkBigBlockList(alloc->GetBigBlocks(background));
  1448. scanRootBytes += TryMarkBigBlockList(alloc->GetFullBlocks());
  1449. }
  1450. scanRootBytes += TryMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  1451. END_DUMP_OBJECT(this);
  1452. #if DBG_DUMP
  1453. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1454. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1455. {
  1456. this->forceTraceMark = false;
  1457. Output::Print(_u("\n"));
  1458. Output::Flush();
  1459. }
  1460. #endif
  1461. // The arena has been scanned so the full blocks can be rearranged at this point
  1462. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1463. if (background || !GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan)
  1464. #endif
  1465. {
  1466. alloc->SetLockBlockList(false);
  1467. }
  1468. return scanRootBytes;
  1469. }
  1470. #if DBG
  1471. bool
  1472. Recycler::ExpectStackSkip() const
  1473. {
  1474. // Okay to skip the stack scan if we're in leak check mode
  1475. bool expectStackSkip = false;
  1476. #ifdef LEAK_REPORT
  1477. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  1478. #endif
  1479. #ifdef CHECK_MEMORY_LEAK
  1480. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().CheckMemoryLeak;
  1481. #endif
  1482. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  1483. expectStackSkip = expectStackSkip || (this->objectGraphDumper != nullptr);
  1484. #endif
  1485. #if defined(INTERNAL_MEM_PROTECT_HEAP_ALLOC)
  1486. expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().MemProtectHeap;
  1487. #endif
  1488. return expectStackSkip || isExternalStackSkippingGC;
  1489. }
  1490. #endif
  1491. #pragma warning(push)
  1492. #pragma warning(disable:4731) // 'pointer' : frame pointer register 'register' modified by inline assembly code
  1493. size_t
  1494. Recycler::ScanStack()
  1495. {
  1496. if (this->skipStack)
  1497. {
  1498. #ifdef RECYCLER_TRACE
  1499. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  1500. #endif
  1501. #if ENABLE_CONCURRENT_GC
  1502. Assert(this->isFinishGCOnIdle || this->isConcurrentGCOnIdle || this->ExpectStackSkip());
  1503. #else
  1504. Assert(this->ExpectStackSkip());
  1505. #endif
  1506. return 0;
  1507. }
  1508. #ifdef RECYCLER_STATS
  1509. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1510. #endif
  1511. GCETW(GC_SCANSTACK_START, (this));
  1512. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ScanStackPhase);
  1513. SAVE_THREAD_CONTEXT();
  1514. void * stackTop = this->savedThreadContext.GetStackTop();
  1515. void * stackStart = GetStackBase();
  1516. Assert(stackStart > stackTop);
  1517. size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
  1518. #if DBG_DUMP
  1519. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1520. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1521. {
  1522. this->forceTraceMark = true;
  1523. Output::Print(_u("Scanning Stack %p(%8d): "), stackTop, (char *)stackStart - (char *)stackTop);
  1524. }
  1525. #endif
  1526. bool doSpecialMark = collectionWrapper->DoSpecialMarkOnScanStack();
  1527. BEGIN_DUMP_OBJECT(this, _u("Registers"));
  1528. if (doSpecialMark)
  1529. {
  1530. ScanMemoryInline<true>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
  1531. }
  1532. else
  1533. {
  1534. ScanMemoryInline<false>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
  1535. }
  1536. END_DUMP_OBJECT(this);
  1537. BEGIN_DUMP_OBJECT(this, _u("Stack"));
  1538. if (doSpecialMark)
  1539. {
  1540. ScanMemoryInline<true>((void**) stackTop, stackScanned);
  1541. }
  1542. else
  1543. {
  1544. ScanMemoryInline<false>((void**) stackTop, stackScanned);
  1545. }
  1546. END_DUMP_OBJECT(this);
  1547. #if DBG_DUMP
  1548. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1549. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
  1550. {
  1551. this->forceTraceMark = false;
  1552. Output::Print(_u("\n"));
  1553. Output::Flush();
  1554. }
  1555. #endif
  1556. RECYCLER_PROFILE_EXEC_END(this, Js::ScanStackPhase);
  1557. RECYCLER_STATS_ADD(this, stackCount, this->collectionStats.markData.markCount - lastMarkCount);
  1558. GCETW(GC_SCANSTACK_STOP, (this));
  1559. return stackScanned;
  1560. }
  1561. #pragma warning(pop)
  1562. template <bool background>
  1563. size_t Recycler::ScanPinnedObjects()
  1564. {
  1565. size_t scanRootBytes = 0;
  1566. BEGIN_DUMP_OBJECT(this, _u("Pinned"));
  1567. {
  1568. this->TryMarkNonInterior(transientPinnedObject, &transientPinnedObject /* parentReference */);
  1569. if (this->scanPinnedObjectMap)
  1570. {
  1571. // We are scanning the pinned object map now, we don't need to rescan unless
  1572. // we reset mark or we add stuff to the map in Recycler::AddRef
  1573. this->scanPinnedObjectMap = false;
  1574. pinnedObjectMap.MapAndRemoveIf([this, &scanRootBytes](void * obj, PinRecord const& refCount)
  1575. {
  1576. if (refCount == 0)
  1577. {
  1578. #ifdef STACK_BACK_TRACE
  1579. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  1580. Assert(refCount.stackBackTraces == nullptr);
  1581. #endif
  1582. #endif
  1583. // Only remove if we are not doing this in the background.
  1584. return !background;
  1585. }
  1586. this->TryMarkNonInterior(obj, static_cast<void*>(const_cast<PinRecord*>(&refCount)) /* parentReference */);
  1587. scanRootBytes += sizeof(void *);
  1588. return false;
  1589. });
  1590. if (!background)
  1591. {
  1592. this->hasPendingUnpinnedObject = false;
  1593. }
  1594. }
  1595. }
  1596. END_DUMP_OBJECT(this);
  1597. if (background)
  1598. {
  1599. // Re-enable resize now that we are done
  1600. pinnedObjectMap.EnableResize();
  1601. }
  1602. return scanRootBytes;
  1603. }
  1604. void
  1605. RecyclerScanMemoryCallback::operator()(void** obj, size_t byteCount)
  1606. {
  1607. this->recycler->ScanMemoryInline<false>(obj, byteCount);
  1608. }
  1609. size_t
  1610. Recycler::FindRoots()
  1611. {
  1612. size_t scanRootBytes = 0;
  1613. #ifdef RECYCLER_STATS
  1614. size_t lastMarkCount = this->collectionStats.markData.markCount;
  1615. #endif
  1616. GCETW(GC_SCANROOTS_START, (this));
  1617. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootPhase);
  1618. #ifdef ENABLE_PROJECTION
  1619. {
  1620. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  1621. BEGIN_DUMP_OBJECT(this, _u("External Weak Referenced Roots"));
  1622. Assert(!this->IsInRefCountTrackingForProjection());
  1623. #if DBG
  1624. AutoIsInRefCountTrackingForProjection autoIsInRefCountTrackingForProjection(this);
  1625. #endif
  1626. collectionWrapper->MarkExternalWeakReferencedObjects(this->inPartialCollectMode);
  1627. END_DUMP_OBJECT(this);
  1628. }
  1629. #endif
  1630. // go through ITracker* stuff. Don't need to do it if we are doing a partial collection
  1631. // as we keep track and mark all trackable objects.
  1632. // Do this first because the host might unpin stuff in the process
  1633. if (externalRootMarker != NULL)
  1634. {
  1635. #if ENABLE_PARTIAL_GC
  1636. if (!this->inPartialCollectMode)
  1637. #endif
  1638. {
  1639. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootExtPhase);
  1640. #if DBG_DUMP
  1641. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1642. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1643. {
  1644. this->forceTraceMark = true;
  1645. Output::Print(_u("Scanning External Roots: "));
  1646. }
  1647. #endif
  1648. BEGIN_DUMP_OBJECT(this, _u("External Roots"));
  1649. // PARTIALGC-TODO: How do we count external roots?
  1650. externalRootMarker(externalRootMarkerContext);
  1651. END_DUMP_OBJECT(this);
  1652. #if DBG_DUMP
  1653. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1654. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1655. {
  1656. this->forceTraceMark = false;
  1657. Output::Print(_u("\n"));
  1658. Output::Flush();
  1659. }
  1660. #endif
  1661. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootExtPhase);
  1662. }
  1663. }
  1664. #if DBG_DUMP
  1665. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1666. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1667. {
  1668. this->forceTraceMark = true;
  1669. Output::Print(_u("Scanning Pinned Objects: "));
  1670. }
  1671. #endif
  1672. scanRootBytes += this->ScanPinnedObjects</*background = */false>();
  1673. #if DBG_DUMP
  1674. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
  1675. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
  1676. {
  1677. this->forceTraceMark = false;
  1678. Output::Print(_u("\n"));
  1679. Output::Flush();
  1680. }
  1681. #endif
  1682. #if ENABLE_CONCURRENT_GC
  1683. Assert(!this->hasPendingConcurrentFindRoot);
  1684. #endif
  1685. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootArenaPhase);
  1686. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  1687. while (guestArenaIter.Next())
  1688. {
  1689. GuestArenaAllocator& allocator = guestArenaIter.Data();
  1690. #if ENABLE_CONCURRENT_GC
  1691. if (allocator.pendingDelete)
  1692. {
  1693. Assert(this->hasPendingDeleteGuestArena);
  1694. allocator.SetLockBlockList(false);
  1695. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  1696. }
  1697. else if (this->backgroundFinishMarkCount == 0)
  1698. #endif
  1699. {
  1700. // Only scan arena if we haven't finished mark in the background
  1701. // (which is true if concurrent GC is disabled)
  1702. scanRootBytes += ScanArena(&allocator, false);
  1703. }
  1704. }
  1705. this->hasPendingDeleteGuestArena = false;
  1706. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  1707. while (externalGuestArenaIter.Next())
  1708. {
  1709. scanRootBytes += ScanArena(externalGuestArenaIter.Data(), false);
  1710. }
  1711. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootArenaPhase);
  1712. this->ScanImplicitRoots();
  1713. RECYCLER_PROFILE_EXEC_END(this, Js::FindRootPhase);
  1714. GCETW(GC_SCANROOTS_STOP, (this));
  1715. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  1716. return scanRootBytes;
  1717. }
  1718. void
  1719. Recycler::ScanImplicitRoots()
  1720. {
  1721. if (this->enableScanImplicitRoots)
  1722. {
  1723. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindImplicitRootPhase);
  1724. if (!this->hasScannedInitialImplicitRoots)
  1725. {
  1726. this->ScanInitialImplicitRoots();
  1727. this->hasScannedInitialImplicitRoots = true;
  1728. }
  1729. else
  1730. {
  1731. this->ScanNewImplicitRoots();
  1732. }
  1733. RECYCLER_PROFILE_EXEC_END(this, Js::FindImplicitRootPhase);
  1734. }
  1735. }
  1736. size_t
  1737. Recycler::TryMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  1738. {
  1739. size_t scanRootBytes = 0;
  1740. ArenaMemoryBlock *blockp = memoryBlocks;
  1741. while (blockp != NULL)
  1742. {
  1743. void** base=(void**)blockp->GetBytes();
  1744. size_t byteCount = blockp->nbytes;
  1745. scanRootBytes += byteCount;
  1746. this->ScanMemory<false>(base, byteCount);
  1747. blockp = blockp->next;
  1748. }
  1749. return scanRootBytes;
  1750. }
  1751. #if ENABLE_CONCURRENT_GC
  1752. #if FALSE
  1753. size_t
  1754. Recycler::TryMarkBigBlockListWithWriteWatch(BigBlock * memoryBlocks)
  1755. {
  1756. DWORD pageSize = AutoSystemInfo::PageSize;
  1757. size_t scanRootBytes = 0;
  1758. BigBlock *blockp = memoryBlocks;
  1759. // Reset the write watch bit if we are scanning this in the background thread
  1760. DWORD const writeWatchFlags = this->IsConcurrentFindRootState()? WRITE_WATCH_FLAG_RESET : 0;
  1761. while (blockp != NULL)
  1762. {
  1763. char * currentAddress = (char *)blockp->GetBytes();
  1764. char * endAddress = currentAddress + blockp->currentByte;
  1765. char * currentPageStart = (char *)blockp->allocation;
  1766. while (currentAddress < endAddress)
  1767. {
  1768. void * written;
  1769. ULONG_PTR count = 1;
  1770. if (::GetWriteWatch(writeWatchFlags, currentPageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) != 0 || count == 1)
  1771. {
  1772. char * currentEnd = min(currentPageStart + pageSize, endAddress);
  1773. size_t byteCount = (size_t)(currentEnd - currentAddress);
  1774. scanRootBytes += byteCount;
  1775. this->ScanMemory<false>((void **)currentAddress, byteCount);
  1776. }
  1777. currentPageStart += pageSize;
  1778. currentAddress = currentPageStart;
  1779. }
  1780. blockp = blockp->nextBigBlock;
  1781. }
  1782. return scanRootBytes;
  1783. }
  1784. #endif
  1785. #endif
  1786. size_t
  1787. Recycler::TryMarkBigBlockList(BigBlock * memoryBlocks)
  1788. {
  1789. size_t scanRootBytes = 0;
  1790. BigBlock *blockp = memoryBlocks;
  1791. while (blockp != NULL)
  1792. {
  1793. void** base = (void**)blockp->GetBytes();
  1794. size_t byteCount = blockp->currentByte;
  1795. scanRootBytes += byteCount;
  1796. this->ScanMemory<false>(base, byteCount);
  1797. blockp = blockp->nextBigBlock;
  1798. }
  1799. return scanRootBytes;
  1800. }
  1801. void
  1802. Recycler::ScanInitialImplicitRoots()
  1803. {
  1804. autoHeap.ScanInitialImplicitRoots();
  1805. }
  1806. void
  1807. Recycler::ScanNewImplicitRoots()
  1808. {
  1809. autoHeap.ScanNewImplicitRoots();
  1810. }
  1811. /*------------------------------------------------------------------------------------------------
  1812. * Mark
  1813. *------------------------------------------------------------------------------------------------*/
  1814. void
  1815. Recycler::ResetMarks(ResetMarkFlags flags)
  1816. {
  1817. Assert(!this->CollectionInProgress());
  1818. collectionState = CollectionStateResetMarks;
  1819. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Reset marks\n"));
  1820. GCETW(GC_RESETMARKS_START, (this));
  1821. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetMarksPhase);
  1822. Assert(IsMarkStackEmpty());
  1823. this->scanPinnedObjectMap = true;
  1824. this->hasScannedInitialImplicitRoots = false;
  1825. heapBlockMap.ResetMarks();
  1826. autoHeap.ResetMarks(flags);
  1827. RECYCLER_PROFILE_EXEC_END(this, Js::ResetMarksPhase);
  1828. GCETW(GC_RESETMARKS_STOP, (this));
  1829. #ifdef RECYCLER_MARK_TRACK
  1830. this->ClearMarkMap();
  1831. #endif
  1832. }
  1833. #ifdef RECYCLER_MARK_TRACK
  1834. void Recycler::ClearMarkMap()
  1835. {
  1836. this->markMap->Clear();
  1837. }
  1838. void Recycler::PrintMarkMap()
  1839. {
  1840. this->markMap->Map([](void* key, void* value)
  1841. {
  1842. Output::Print(_u("0x%P => 0x%P\n"), key, value);
  1843. });
  1844. }
  1845. #endif
  1846. #if DBG
  1847. void
  1848. Recycler::CheckAllocExternalMark() const
  1849. {
  1850. Assert(!disableThreadAccessCheck);
  1851. Assert(GetCurrentThreadContextId() == mainThreadId);
  1852. #if ENABLE_CONCURRENT_GC
  1853. #ifdef HEAP_ENUMERATION_VALIDATION
  1854. Assert((this->IsMarkState() || this->IsPostEnumHeapValidationInProgress()) && collectionState != CollectionStateConcurrentMark);
  1855. #else
  1856. Assert(this->IsMarkState() && collectionState != CollectionStateConcurrentMark);
  1857. #endif
  1858. #else
  1859. Assert(this->IsMarkState());
  1860. #endif
  1861. }
  1862. #endif
  1863. void
  1864. Recycler::TryMarkNonInterior(void* candidate, void* parentReference)
  1865. {
  1866. #ifdef HEAP_ENUMERATION_VALIDATION
  1867. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1868. #else
  1869. Assert(!isHeapEnumInProgress);
  1870. #endif
  1871. Assert(this->collectionState != CollectionStateParallelMark);
  1872. markContext.Mark</*parallel */ false, /* interior */ false, /* doSpecialMark */ false>(candidate, parentReference);
  1873. }
  1874. void
  1875. Recycler::TryMarkInterior(void* candidate, void* parentReference)
  1876. {
  1877. #ifdef HEAP_ENUMERATION_VALIDATION
  1878. Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
  1879. #else
  1880. Assert(!isHeapEnumInProgress);
  1881. #endif
  1882. Assert(this->collectionState != CollectionStateParallelMark);
  1883. markContext.Mark</*parallel */ false, /* interior */ true, /* doSpecialMark */ false>(candidate, parentReference);
  1884. }
  1885. template <bool parallel, bool interior>
  1886. void
  1887. Recycler::ProcessMarkContext(MarkContext * markContext)
  1888. {
  1889. #if ENABLE_CONCURRENT_GC
  1890. // Copying the markContext onto the stack messes up tracked object handling, because
  1891. // the tracked object will call TryMark[Non]Interior to report its references.
  1892. // These functions implicitly use the main markContext on the Recycler, but this will
  1893. // be overridden if we're processing the main markContext here.
  1894. // So, don't do this if we are going to process tracked objects.
  1895. // (This will be the case if we're not queuing and we're not in partial mode, which ignores tracked objects.)
  1896. // In this case we shouldn't be parallel anyway, so we don't need to worry about cache behavior.
  1897. // We should revisit how we manage markContexts in general in the future, and clean this up
  1898. // by passing the MarkContext through to the tracked object's Mark method.
  1899. #if ENABLE_PARTIAL_GC
  1900. if (this->inPartialCollectMode || DoQueueTrackedObject())
  1901. #else
  1902. if (DoQueueTrackedObject())
  1903. #endif
  1904. {
  1905. // The markContext as passed is one of the markContexts that lives on the Recycler.
  1906. // Copy it locally for processing.
  1907. // This serves two purposes:
  1908. // (1) Allow for better codegen because the markContext is local and we don't need to track the this pointer separately
  1909. // (because all the key processing is inlined into this function).
  1910. // (2) Ensure we don't have weird cache behavior because we're accidentally writing to the same cache line from
  1911. // multiple threads during parallel marking.
  1912. MarkContext localMarkContext = *markContext;
  1913. // Do the actual marking.
  1914. localMarkContext.ProcessMark<parallel, interior>();
  1915. // Copy back to the original location.
  1916. *markContext = localMarkContext;
  1917. // Clear the local mark context.
  1918. localMarkContext.Clear();
  1919. }
  1920. else
  1921. #endif
  1922. {
  1923. Assert(!parallel);
  1924. markContext->ProcessMark<parallel, interior>();
  1925. }
  1926. }
  1927. void
  1928. Recycler::ProcessMark(bool background)
  1929. {
  1930. if (background)
  1931. {
  1932. GCETW(GC_BACKGROUNDMARK_START, (this, backgroundRescanCount));
  1933. }
  1934. else
  1935. {
  1936. GCETW(GC_MARK_START, (this));
  1937. }
  1938. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1939. if (this->enableScanInteriorPointers)
  1940. {
  1941. this->ProcessMarkContext</* parallel */ false, /* interior */ true>(&markContext);
  1942. }
  1943. else
  1944. {
  1945. this->ProcessMarkContext</* parallel */ false, /* interior */ false>(&markContext);
  1946. }
  1947. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  1948. if (background)
  1949. {
  1950. GCETW(GC_BACKGROUNDMARK_STOP, (this, backgroundRescanCount));
  1951. }
  1952. else
  1953. {
  1954. GCETW(GC_MARK_STOP, (this));
  1955. }
  1956. DebugOnly(this->markContext.VerifyPostMarkState());
  1957. }
  1958. void
  1959. Recycler::ProcessParallelMark(bool background, MarkContext * markContext)
  1960. {
  1961. if (background)
  1962. {
  1963. GCETW(GC_BACKGROUNDPARALLELMARK_START, (this, backgroundRescanCount));
  1964. }
  1965. else
  1966. {
  1967. GCETW(GC_PARALLELMARK_START, (this));
  1968. }
  1969. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
  1970. if (this->enableScanInteriorPointers)
  1971. {
  1972. this->ProcessMarkContext</* parallel */ true, /* interior */ true>(markContext);
  1973. }
  1974. else
  1975. {
  1976. this->ProcessMarkContext</* parallel */ true, /* interior */ false>(markContext);
  1977. }
  1978. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
  1979. if (background)
  1980. {
  1981. GCETW(GC_BACKGROUNDPARALLELMARK_STOP, (this, backgroundRescanCount));
  1982. }
  1983. else
  1984. {
  1985. GCETW(GC_PARALLELMARK_STOP, (this));
  1986. }
  1987. }
  1988. void
  1989. Recycler::Mark()
  1990. {
  1991. // Marking in thread, we can just pre-mark them
  1992. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InThreadImplicitRoots : ResetMarkFlags_InThread);
  1993. collectionState = CollectionStateFindRoots;
  1994. RootMark(CollectionStateMark);
  1995. }
  1996. #if ENABLE_CONCURRENT_GC
  1997. void
  1998. Recycler::StartQueueTrackedObject()
  1999. {
  2000. Assert(!this->queueTrackedObject);
  2001. Assert(!this->HasPendingTrackObjects());
  2002. #if ENABLE_PARTIAL_GC
  2003. Assert(this->clientTrackedObjectList.Empty());
  2004. Assert(!this->inPartialCollectMode);
  2005. #endif
  2006. this->queueTrackedObject = true;
  2007. }
  2008. bool
  2009. Recycler::DoQueueTrackedObject() const
  2010. {
  2011. Assert(this->queueTrackedObject || !this->IsConcurrentMarkState());
  2012. Assert(this->queueTrackedObject || this->isProcessingTrackedObjects || !this->HasPendingTrackObjects());
  2013. #if ENABLE_PARTIAL_GC
  2014. Assert(this->queueTrackedObject || this->inPartialCollectMode || !(this->collectionState == CollectionStateParallelMark));
  2015. Assert(!this->queueTrackedObject || (this->clientTrackedObjectList.Empty() && !this->inPartialCollectMode));
  2016. #else
  2017. Assert(this->queueTrackedObject || !(this->collectionState == CollectionStateParallelMark));
  2018. #endif
  2019. return this->queueTrackedObject;
  2020. }
  2021. #endif
  2022. void
  2023. Recycler::ResetCollectionState()
  2024. {
  2025. Assert(IsMarkStackEmpty());
  2026. this->collectionState = CollectionStateNotCollecting;
  2027. #if ENABLE_CONCURRENT_GC
  2028. this->backgroundFinishMarkCount = 0;
  2029. #endif
  2030. this->inExhaustiveCollection = false;
  2031. this->inDecommitNowCollection = false;
  2032. #if ENABLE_CONCURRENT_GC
  2033. CleanupPendingUnroot();
  2034. #endif
  2035. #if ENABLE_PARTIAL_GC
  2036. if (inPartialCollectMode)
  2037. {
  2038. FinishPartialCollect();
  2039. }
  2040. #endif
  2041. #if ENABLE_CONCURRENT_GC
  2042. Assert(!this->DoQueueTrackedObject());
  2043. #endif
  2044. #ifdef RECYCLER_FINALIZE_CHECK
  2045. // Reset the collection stats.
  2046. this->collectionStats.finalizeCount = this->autoHeap.liveFinalizableObjectCount - this->autoHeap.newFinalizableObjectCount - this->autoHeap.pendingDisposableObjectCount;
  2047. #endif
  2048. }
  2049. void
  2050. Recycler::ResetMarkCollectionState()
  2051. {
  2052. // If we aborted after doing a background Rescan, there will be entries in the markContext.
  2053. // Abort these entries and reset the markContext state.
  2054. markContext.Abort();
  2055. // If we aborted after doing a background parallel Mark, we wouldn't have cleaned up the
  2056. // parallel markContexts yet. Clean these up now.
  2057. // Note parallelMarkContext1 is not used in background parallel (see DoBackgroundParallelMark)
  2058. parallelMarkContext2.Cleanup();
  2059. parallelMarkContext3.Cleanup();
  2060. this->ClearNeedOOMRescan();
  2061. DebugOnly(this->isProcessingRescan = false);
  2062. #if ENABLE_CONCURRENT_GC
  2063. // If we're reseting the mark collection state, we need to unlock the block list
  2064. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  2065. while (guestArenaIter.Next())
  2066. {
  2067. GuestArenaAllocator& allocator = guestArenaIter.Data();
  2068. allocator.SetLockBlockList(false);
  2069. }
  2070. this->queueTrackedObject = false;
  2071. #endif
  2072. ResetCollectionState();
  2073. }
  2074. void
  2075. Recycler::ResetHeuristicCounters()
  2076. {
  2077. autoHeap.lastUncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  2078. autoHeap.uncollectedAllocBytes = 0;
  2079. autoHeap.uncollectedExternalBytes = 0;
  2080. ResetPartialHeuristicCounters();
  2081. }
  2082. void Recycler::ResetPartialHeuristicCounters()
  2083. {
  2084. #if ENABLE_PARTIAL_GC
  2085. autoHeap.uncollectedNewPageCount = 0;
  2086. #endif
  2087. }
  2088. void
  2089. Recycler::ScheduleNextCollection()
  2090. {
  2091. this->tickCountNextCollection = ::GetTickCount() + RecyclerHeuristic::TickCountCollection;
  2092. this->tickCountNextFinishCollection = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
  2093. }
  2094. #if ENABLE_CONCURRENT_GC
  2095. void
  2096. Recycler::PrepareSweep()
  2097. {
  2098. autoHeap.PrepareSweep();
  2099. }
  2100. #endif
  2101. size_t
  2102. Recycler::RescanMark(DWORD waitTime)
  2103. {
  2104. bool const onLowMemory = this->NeedOOMRescan();
  2105. // REVIEW: Why are we asserting for DoQueueTrackedObject here?
  2106. // Should we split this into different asserts depending on whether
  2107. // concurrent or partial is enabled?
  2108. #if ENABLE_CONCURRENT_GC
  2109. #if ENABLE_PARTIAL_GC
  2110. Assert(this->inPartialCollectMode || DoQueueTrackedObject());
  2111. #else
  2112. Assert(DoQueueTrackedObject());
  2113. #endif
  2114. #endif
  2115. {
  2116. // We are about to do a rescan mark, which for consistency requires the runtime to stop any additional mutator threads
  2117. AUTO_NO_EXCEPTION_REGION;
  2118. collectionWrapper->PreRescanMarkCallback();
  2119. }
  2120. // Always called in-thread
  2121. Assert(collectionState == CollectionStateRescanFindRoots);
  2122. #if ENABLE_CONCURRENT_GC
  2123. if (!onLowMemory && // Don't do background finish mark if we are low on memory
  2124. // Only do background finish mark if we have a time limit or it is forced
  2125. (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase) || waitTime != INFINITE) &&
  2126. // Don't do background finish mark if we failed to finish mark too many times
  2127. (this->backgroundFinishMarkCount < RecyclerHeuristic::MaxBackgroundFinishMarkCount(this->GetRecyclerFlagsTable())))
  2128. {
  2129. this->PrepareBackgroundFindRoots();
  2130. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  2131. {
  2132. this->backgroundFinishMarkCount++;
  2133. this->PrepareSweep();
  2134. GCETW(GC_RESCANMARKWAIT_START, (this, waitTime));
  2135. const BOOL waited = WaitForConcurrentThread(waitTime);
  2136. GCETW(GC_RESCANMARKWAIT_STOP, (this, !waited));
  2137. if (!waited)
  2138. {
  2139. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase, _u("Finish mark timed out\n"));
  2140. {
  2141. // We timed out doing the finish mark, notify the runtime
  2142. AUTO_NO_EXCEPTION_REGION;
  2143. collectionWrapper->RescanMarkTimeoutCallback();
  2144. }
  2145. return Recycler::InvalidScanRootBytes;
  2146. }
  2147. Assert(collectionState == CollectionStateRescanWait);
  2148. collectionState = CollectionStateRescanFindRoots;
  2149. #ifdef RECYCLER_WRITE_WATCH
  2150. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2151. {
  2152. Assert(recyclerPageAllocator.GetWriteWatchPageCount() == 0);
  2153. Assert(recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0);
  2154. }
  2155. #endif
  2156. return this->backgroundRescanRootBytes;
  2157. }
  2158. this->RevertPrepareBackgroundFindRoots();
  2159. }
  2160. #endif
  2161. #if ENABLE_CONCURRENT_GC
  2162. this->backgroundFinishMarkCount = 0;
  2163. #endif
  2164. return FinishMarkRescan(false) * AutoSystemInfo::PageSize;
  2165. }
  2166. size_t
  2167. Recycler::FinishMark(DWORD waitTime)
  2168. {
  2169. size_t scannedRootBytes = RescanMark(waitTime);
  2170. Assert(waitTime != INFINITE || scannedRootBytes != Recycler::InvalidScanRootBytes);
  2171. if (scannedRootBytes != Recycler::InvalidScanRootBytes)
  2172. {
  2173. #if DBG && ENABLE_PARTIAL_GC
  2174. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("CTO: %d\n"), this->clientTrackedObjectList.Count());
  2175. #endif
  2176. #if ENABLE_PARTIAL_GC
  2177. if (this->inPartialCollectMode)
  2178. {
  2179. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing client tracked objects\n"));
  2180. ProcessClientTrackedObjects();
  2181. }
  2182. else
  2183. #endif
  2184. #if ENABLE_CONCURRENT_GC
  2185. if (DoQueueTrackedObject())
  2186. {
  2187. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing regular tracked objects\n"));
  2188. ProcessTrackedObjects();
  2189. #ifdef RECYCLER_WRITE_WATCH
  2190. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2191. {
  2192. Assert(this->backgroundFinishMarkCount == 0 ||
  2193. (this->recyclerPageAllocator.GetWriteWatchPageCount() == 0 &&
  2194. this->recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0));
  2195. }
  2196. #endif
  2197. }
  2198. #endif
  2199. // Continue to mark from root one more time
  2200. scannedRootBytes += RootMark(CollectionStateRescanMark);
  2201. }
  2202. return scannedRootBytes;
  2203. }
  2204. #if ENABLE_CONCURRENT_GC
  2205. void
  2206. Recycler::DoParallelMark()
  2207. {
  2208. Assert(this->enableParallelMark);
  2209. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2210. // Split the mark stack into [this->maxParallelism] equal pieces.
  2211. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2212. MarkContext * splitContexts[3] = { &parallelMarkContext1, &parallelMarkContext2, &parallelMarkContext3 };
  2213. uint actualSplitCount = markContext.Split(this->maxParallelism - 1, splitContexts);
  2214. Assert(actualSplitCount <= 3);
  2215. // If we failed to split at all, just mark in thread with no parallelism.
  2216. if (actualSplitCount == 0)
  2217. {
  2218. this->ProcessMark(false);
  2219. return;
  2220. }
  2221. // We need to queue tracked objects while we mark in parallel.
  2222. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2223. #if ENABLE_PARTIAL_GC
  2224. if (!this->inPartialCollectMode)
  2225. #endif
  2226. {
  2227. StartQueueTrackedObject();
  2228. }
  2229. // Kick off marking on the background thread
  2230. bool concurrentSuccess = StartConcurrent(CollectionStateParallelMark);
  2231. // If there's enough work to split, then kick off marking on parallel threads too.
  2232. // If the threads haven't been created yet, this will create them (or fail).
  2233. bool parallelSuccess1 = false;
  2234. bool parallelSuccess2 = false;
  2235. if (concurrentSuccess && actualSplitCount >= 2)
  2236. {
  2237. parallelSuccess1 = parallelThread1.StartConcurrent();
  2238. if (parallelSuccess1 && actualSplitCount == 3)
  2239. {
  2240. parallelSuccess2 = parallelThread2.StartConcurrent();
  2241. }
  2242. }
  2243. // Process our portion of the split.
  2244. this->ProcessParallelMark(false, &parallelMarkContext1);
  2245. // If we successfully launched parallel work, wait for it to complete.
  2246. // If we failed, then process the work in-thread now.
  2247. if (concurrentSuccess)
  2248. {
  2249. WaitForConcurrentThread(INFINITE);
  2250. }
  2251. else
  2252. {
  2253. this->ProcessParallelMark(false, &markContext);
  2254. }
  2255. if (actualSplitCount >= 2)
  2256. {
  2257. if (parallelSuccess1)
  2258. {
  2259. parallelThread1.WaitForConcurrent();
  2260. }
  2261. else
  2262. {
  2263. this->ProcessParallelMark(false, &parallelMarkContext2);
  2264. }
  2265. if (actualSplitCount == 3)
  2266. {
  2267. if (parallelSuccess2)
  2268. {
  2269. parallelThread2.WaitForConcurrent();
  2270. }
  2271. else
  2272. {
  2273. this->ProcessParallelMark(false, &parallelMarkContext3);
  2274. }
  2275. }
  2276. }
  2277. this->collectionState = CollectionStateMark;
  2278. // Process tracked objects, if any, then do one final mark phase in case they marked any new objects.
  2279. // (Unless it's a partial collect, in which case we don't process tracked objects at all)
  2280. #if ENABLE_PARTIAL_GC
  2281. if (!this->inPartialCollectMode)
  2282. #endif
  2283. {
  2284. this->ProcessTrackedObjects();
  2285. this->ProcessMark(false);
  2286. }
  2287. #if ENABLE_PARTIAL_GC
  2288. else
  2289. {
  2290. Assert(!this->HasPendingTrackObjects());
  2291. }
  2292. #endif
  2293. }
  2294. void
  2295. Recycler::DoBackgroundParallelMark()
  2296. {
  2297. // Split the mark stack into [this->maxParallelism - 1] equal pieces (thus, "- 2" below).
  2298. // The actual # of splits is returned, in case the stack was too small to split that many ways.
  2299. // The parallel threads are hardwired to use parallelMarkContext2/3, so we split using those.
  2300. uint actualSplitCount = 0;
  2301. MarkContext * splitContexts[2] = { &parallelMarkContext2, &parallelMarkContext3 };
  2302. if (this->enableParallelMark)
  2303. {
  2304. Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
  2305. if (this->maxParallelism > 2)
  2306. {
  2307. actualSplitCount = markContext.Split(this->maxParallelism - 2, splitContexts);
  2308. }
  2309. }
  2310. Assert(actualSplitCount <= 2);
  2311. // If we failed to split at all, just mark in thread with no parallelism.
  2312. if (actualSplitCount == 0)
  2313. {
  2314. this->ProcessMark(true);
  2315. return;
  2316. }
  2317. #if ENABLE_PARTIAL_GC
  2318. // We should already be set up to queue tracked objects, unless this is a partial collect
  2319. Assert(this->DoQueueTrackedObject() || this->inPartialCollectMode);
  2320. #else
  2321. Assert(this->DoQueueTrackedObject());
  2322. #endif
  2323. this->collectionState = CollectionStateBackgroundParallelMark;
  2324. // Kick off marking on parallel threads too, if there is work for them
  2325. // If the threads haven't been created yet, this will create them (or fail).
  2326. bool parallelSuccess1 = false;
  2327. bool parallelSuccess2 = false;
  2328. parallelSuccess1 = parallelThread1.StartConcurrent();
  2329. if (parallelSuccess1 && actualSplitCount == 2)
  2330. {
  2331. parallelSuccess2 = parallelThread2.StartConcurrent();
  2332. }
  2333. // Process our portion of the split.
  2334. this->ProcessParallelMark(true, &markContext);
  2335. // If we successfully launched parallel work, wait for it to complete.
  2336. // If we failed, then process the work in-thread now.
  2337. if (parallelSuccess1)
  2338. {
  2339. parallelThread1.WaitForConcurrent();
  2340. }
  2341. else
  2342. {
  2343. this->ProcessParallelMark(true, &parallelMarkContext2);
  2344. }
  2345. if (actualSplitCount == 2)
  2346. {
  2347. if (parallelSuccess2)
  2348. {
  2349. parallelThread2.WaitForConcurrent();
  2350. }
  2351. else
  2352. {
  2353. this->ProcessParallelMark(true, &parallelMarkContext3);
  2354. }
  2355. }
  2356. this->collectionState = CollectionStateConcurrentMark;
  2357. }
  2358. #endif
  2359. size_t
  2360. Recycler::RootMark(CollectionState markState)
  2361. {
  2362. size_t scannedRootBytes = 0;
  2363. Assert(!this->NeedOOMRescan() || markState == CollectionStateRescanMark);
  2364. #if ENABLE_PARTIAL_GC
  2365. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect: %d\n"), this->inPartialCollectMode);
  2366. #else
  2367. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect not available\n"));
  2368. #endif
  2369. Assert(collectionState == (markState == CollectionStateMark? CollectionStateFindRoots : CollectionStateRescanFindRoots));
  2370. BOOL stacksScannedByRuntime = FALSE;
  2371. {
  2372. // We are about to scan roots in thread, notify the runtime first so it can stop threads if necessary and also provide additional roots
  2373. AUTO_NO_EXCEPTION_REGION;
  2374. RecyclerScanMemoryCallback scanMemory(this);
  2375. scannedRootBytes += collectionWrapper->RootMarkCallback(scanMemory, &stacksScannedByRuntime);
  2376. }
  2377. scannedRootBytes += FindRoots();
  2378. if (!stacksScannedByRuntime)
  2379. {
  2380. // The runtime did not scan the stack(s) for us, so we use the normal Recycler code.
  2381. scannedRootBytes += ScanStack();
  2382. }
  2383. this->collectionState = markState;
  2384. #if ENABLE_CONCURRENT_GC
  2385. if (this->enableParallelMark)
  2386. {
  2387. this->DoParallelMark();
  2388. }
  2389. else
  2390. #endif
  2391. {
  2392. this->ProcessMark(false);
  2393. }
  2394. if (this->EndMark())
  2395. {
  2396. // REVIEW: This heuristic doesn't apply when partial is off so there's no need
  2397. // to modify scannedRootBytes here, correct?
  2398. #if ENABLE_PARTIAL_GC
  2399. // return large root scanned byte to not get into partial mode if we are low on memory
  2400. scannedRootBytes = RecyclerSweep::MaxPartialCollectRescanRootBytes + 1;
  2401. #endif
  2402. }
  2403. return scannedRootBytes;
  2404. }
  2405. bool
  2406. Recycler::EndMarkCheckOOMRescan()
  2407. {
  2408. bool oomRescan = false;
  2409. if (this->NeedOOMRescan())
  2410. {
  2411. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  2412. if (this->objectGraphDumper)
  2413. {
  2414. // Do not complete the mark if we are just dumping the object graph
  2415. // Just report out of memory
  2416. this->objectGraphDumper->isOutOfMemory = true;
  2417. this->ClearNeedOOMRescan();
  2418. }
  2419. else
  2420. #endif
  2421. {
  2422. EndMarkOnLowMemory();
  2423. oomRescan = true;
  2424. }
  2425. }
  2426. // Done with the mark stack, it should be empty.
  2427. // Release pages it is holding.
  2428. Assert(!HasPendingMarkObjects());
  2429. Assert(!HasPendingTrackObjects());
  2430. return oomRescan;
  2431. }
  2432. bool
  2433. Recycler::EndMark()
  2434. {
  2435. #if ENABLE_CONCURRENT_GC
  2436. Assert(!this->DoQueueTrackedObject());
  2437. #endif
  2438. #if ENABLE_PARTIAL_GC
  2439. Assert(this->clientTrackedObjectList.Empty());
  2440. #endif
  2441. {
  2442. // We have finished marking
  2443. AUTO_NO_EXCEPTION_REGION;
  2444. collectionWrapper->EndMarkCallback();
  2445. }
  2446. bool oomRescan = EndMarkCheckOOMRescan();
  2447. if (ProcessObjectBeforeCollectCallbacks())
  2448. {
  2449. // callbacks may trigger additional marking, need to check OOMRescan again
  2450. oomRescan |= EndMarkCheckOOMRescan();
  2451. }
  2452. // GC-CONSIDER: Consider keeping some page around
  2453. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_START, (this));
  2454. // Clean up mark contexts, which will release held free pages
  2455. // Do this for all contexts before we decommit, to make sure all pages are freed
  2456. markContext.Cleanup();
  2457. parallelMarkContext1.Cleanup();
  2458. parallelMarkContext2.Cleanup();
  2459. parallelMarkContext3.Cleanup();
  2460. // Decommit all pages
  2461. markContext.DecommitPages();
  2462. parallelMarkContext1.DecommitPages();
  2463. parallelMarkContext2.DecommitPages();
  2464. parallelMarkContext3.DecommitPages();
  2465. GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_STOP, (this));
  2466. return oomRescan;
  2467. }
  2468. void
  2469. Recycler::EndMarkOnLowMemory()
  2470. {
  2471. GCETW(GC_ENDMARKONLOWMEMORY_START, (this));
  2472. Assert(this->NeedOOMRescan());
  2473. this->inEndMarkOnLowMemory = true;
  2474. // Treat this as a concurrent mark reset so that we don't invalidate the allocators
  2475. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("OOM during mark- rerunning mark\n"));
  2476. // Try to release as much memory as possible
  2477. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  2478. {
  2479. pageAlloc->DecommitNow();
  2480. });
  2481. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2482. uint iterations = 0;
  2483. #endif
  2484. do
  2485. {
  2486. #if ENABLE_PARTIAL_GC
  2487. Assert(this->clientTrackedObjectList.Empty());
  2488. #endif
  2489. #if ENABLE_CONCURRENT_GC
  2490. // Always queue tracked objects during rescan, to avoid changes to mark state.
  2491. // (Unless we're in a partial, in which case we ignore tracked objects)
  2492. Assert(!this->DoQueueTrackedObject());
  2493. #if ENABLE_PARTIAL_GC
  2494. if (!this->inPartialCollectMode)
  2495. #endif
  2496. {
  2497. this->StartQueueTrackedObject();
  2498. }
  2499. #endif
  2500. this->collectionState = CollectionStateRescanFindRoots;
  2501. this->ClearNeedOOMRescan();
  2502. #if DBG
  2503. Assert(!this->isProcessingRescan);
  2504. this->isProcessingRescan = true;
  2505. #endif
  2506. if (!heapBlockMap.OOMRescan(this))
  2507. {
  2508. // Kill the process- we couldn't even rescan a single block
  2509. // We are in pretty low memory state at this point
  2510. // The fail-fast is present for two reasons:
  2511. // 1) Defense-in-depth for cases we hadn't thought about
  2512. // 2) Deal with cases like -MaxMarkStackPageCount:1 which can still hang without the fail-fast
  2513. MarkStack_OOM_fatal_error();
  2514. }
  2515. autoHeap.Rescan(RescanFlags_None);
  2516. DebugOnly(this->isProcessingRescan = false);
  2517. this->ProcessMark(false);
  2518. #if ENABLE_CONCURRENT_GC
  2519. // Process any tracked objects we found
  2520. #if ENABLE_PARTIAL_GC
  2521. if (!this->inPartialCollectMode)
  2522. #endif
  2523. {
  2524. ProcessTrackedObjects();
  2525. }
  2526. #endif
  2527. // Drain the mark stack
  2528. ProcessMark(false);
  2529. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2530. iterations++;
  2531. #endif
  2532. }
  2533. while (this->NeedOOMRescan());
  2534. Assert(!markContext.GetPageAllocator()->DisableAllocationOutOfMemory());
  2535. Assert(!parallelMarkContext1.GetPageAllocator()->DisableAllocationOutOfMemory());
  2536. Assert(!parallelMarkContext2.GetPageAllocator()->DisableAllocationOutOfMemory());
  2537. Assert(!parallelMarkContext3.GetPageAllocator()->DisableAllocationOutOfMemory());
  2538. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::RecyclerPhase, _u("EndMarkOnLowMemory iterations: %d\n"), iterations);
  2539. #if ENABLE_PARTIAL_GC
  2540. Assert(this->clientTrackedObjectList.Empty());
  2541. #endif
  2542. #if ENABLE_CONCURRENT_GC
  2543. Assert(!this->DoQueueTrackedObject());
  2544. #endif
  2545. this->inEndMarkOnLowMemory = false;
  2546. #if ENABLE_PARTIAL_GC
  2547. if (this->inPartialCollectMode)
  2548. {
  2549. this->FinishPartialCollect();
  2550. }
  2551. #endif
  2552. GCETW(GC_ENDMARKONLOWMEMORY_STOP, (this));
  2553. }
  2554. #if DBG
  2555. bool
  2556. Recycler::IsMarkStackEmpty()
  2557. {
  2558. return (markContext.IsEmpty() && parallelMarkContext1.IsEmpty() && parallelMarkContext2.IsEmpty() && parallelMarkContext3.IsEmpty());
  2559. }
  2560. #endif
  2561. #ifdef HEAP_ENUMERATION_VALIDATION
  2562. void
  2563. Recycler::PostHeapEnumScan(PostHeapEnumScanCallback callback, void *data)
  2564. {
  2565. this->pfPostHeapEnumScanCallback = callback;
  2566. this->postHeapEnunScanData = data;
  2567. FindRoots();
  2568. ProcessMark(false);
  2569. this->pfPostHeapEnumScanCallback = NULL;
  2570. this->postHeapEnunScanData = NULL;
  2571. }
  2572. #endif
  2573. #if ENABLE_CONCURRENT_GC
  2574. bool
  2575. Recycler::QueueTrackedObject(FinalizableObject * trackableObject)
  2576. {
  2577. return markContext.AddTrackedObject(trackableObject);
  2578. }
  2579. #endif
  2580. bool
  2581. Recycler::FindImplicitRootObject(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2582. {
  2583. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2584. if (heapBlock == nullptr)
  2585. {
  2586. return false;
  2587. }
  2588. if (heapBlock->GetHeapBlockType() < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount)
  2589. {
  2590. return ((SmallHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2591. }
  2592. else if (!heapBlock->IsLargeHeapBlock())
  2593. {
  2594. return ((MediumHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2595. }
  2596. else
  2597. {
  2598. return ((LargeHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
  2599. }
  2600. }
  2601. bool
  2602. Recycler::FindHeapObject(void* candidate, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject)
  2603. {
  2604. HeapBlock* heapBlock = FindHeapBlock(candidate);
  2605. return heapBlock && heapBlock->FindHeapObject(candidate, this, flags, heapObject);
  2606. }
  2607. bool
  2608. Recycler::FindHeapObjectWithClearedAllocators(void* candidate, RecyclerHeapObjectInfo& heapObject)
  2609. {
  2610. // Heap enum has some case where it allocates, so we can't assert
  2611. Assert(autoHeap.AllocatorsAreEmpty() || this->isHeapEnumInProgress);
  2612. return FindHeapObject(candidate, FindHeapObjectFlags_ClearedAllocators, heapObject);
  2613. }
  2614. void*
  2615. Recycler::GetRealAddressFromInterior(void* candidate)
  2616. {
  2617. HeapBlock * heapBlock = heapBlockMap.GetHeapBlock(candidate);
  2618. if (heapBlock == NULL)
  2619. {
  2620. return NULL;
  2621. }
  2622. return heapBlock->GetRealAddressFromInterior(candidate);
  2623. }
  2624. /*------------------------------------------------------------------------------------------------
  2625. * Sweep
  2626. *------------------------------------------------------------------------------------------------*/
  2627. #if ENABLE_PARTIAL_GC
  2628. bool
  2629. Recycler::Sweep(size_t rescanRootBytes, bool concurrent, bool adjustPartialHeuristics)
  2630. #else
  2631. bool
  2632. Recycler::Sweep(bool concurrent)
  2633. #endif
  2634. {
  2635. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2636. Assert(!this->hasBackgroundFinishPartial);
  2637. #endif
  2638. #if ENABLE_CONCURRENT_GC
  2639. if (!this->enableConcurrentSweep)
  2640. #endif
  2641. {
  2642. concurrent = false;
  2643. }
  2644. RECYCLER_PROFILE_EXEC_BEGIN(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2645. #if ENABLE_PARTIAL_GC
  2646. recyclerSweepInstance.BeginSweep(this, rescanRootBytes, adjustPartialHeuristics);
  2647. #else
  2648. recyclerSweepInstance.BeginSweep(this);
  2649. #endif
  2650. this->SweepHeap(concurrent, *recyclerSweep);
  2651. #if ENABLE_CONCURRENT_GC
  2652. if (concurrent)
  2653. {
  2654. // If we finished mark in the background, all the relevant write watches should already be reset
  2655. // Only reset write watch if we didn't finish mark in the background
  2656. if (this->backgroundFinishMarkCount == 0)
  2657. {
  2658. #if ENABLE_PARTIAL_GC
  2659. if (this->inPartialCollectMode)
  2660. {
  2661. #ifdef RECYCLER_WRITE_WATCH
  2662. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  2663. {
  2664. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  2665. if (!recyclerPageAllocator.ResetWriteWatch() || !recyclerLargeBlockPageAllocator.ResetWriteWatch())
  2666. {
  2667. // Shouldn't happen
  2668. Assert(false);
  2669. // Disable partial collect
  2670. this->enablePartialCollect = false;
  2671. // We haven't done any partial collection yet, just get out of partial collect mode
  2672. this->inPartialCollectMode = false;
  2673. }
  2674. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  2675. }
  2676. #endif
  2677. }
  2678. #endif
  2679. }
  2680. }
  2681. else
  2682. #endif
  2683. {
  2684. recyclerSweep->FinishSweep();
  2685. recyclerSweep->EndSweep();
  2686. }
  2687. RECYCLER_PROFILE_EXEC_END(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
  2688. #if ENABLE_CONCURRENT_GC
  2689. if (concurrent)
  2690. {
  2691. if (!StartConcurrent(CollectionStateConcurrentSweep))
  2692. {
  2693. // Failed to spawn the concurrent sweep.
  2694. // Instead, force the concurrent sweep to happen right here in thread.
  2695. this->collectionState = CollectionStateConcurrentSweep;
  2696. DoBackgroundWork(true);
  2697. // Continue as if the concurrent sweep were executing
  2698. // Next time we check for completion, we will finish the sweep just as if it had happened out of thread.
  2699. }
  2700. return true;
  2701. }
  2702. #endif
  2703. return false;
  2704. }
  2705. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2706. void Recycler::DisplayMemStats()
  2707. {
  2708. #ifdef PERF_COUNTERS
  2709. #if DBG_DUMP
  2710. printf("Recycler Live Object Count %u\n", PerfCounter::RecyclerCounterSet::GetLiveObjectCounter().GetValue());
  2711. printf("Recycler Live Object Size %u\n", PerfCounter::RecyclerCounterSet::GetLiveObjectSizeCounter().GetValue());
  2712. #endif
  2713. printf("Recycler Used Page Size %u\n", PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(PageAllocatorType::PageAllocatorType_Recycler).GetValue());
  2714. #endif
  2715. }
  2716. #endif
  2717. CollectedRecyclerWeakRefHeapBlock CollectedRecyclerWeakRefHeapBlock::Instance;
  2718. void
  2719. Recycler::SweepWeakReference()
  2720. {
  2721. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::SweepWeakPhase);
  2722. GCETW(GC_SWEEP_WEAKREF_START, (this));
  2723. // REVIEW: Clean up the weak reference map concurrently?
  2724. bool hasCleanup = false;
  2725. weakReferenceMap.Map([&hasCleanup](RecyclerWeakReferenceBase * weakRef) -> bool
  2726. {
  2727. if (!weakRef->weakRefHeapBlock->TestObjectMarkedBit(weakRef))
  2728. {
  2729. hasCleanup = true;
  2730. // Remove
  2731. return false;
  2732. }
  2733. if (!weakRef->strongRefHeapBlock->TestObjectMarkedBit(weakRef->strongRef))
  2734. {
  2735. hasCleanup = true;
  2736. weakRef->strongRef = nullptr;
  2737. // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
  2738. weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
  2739. // Remove
  2740. return false;
  2741. }
  2742. // Keep
  2743. return true;
  2744. });
  2745. this->weakReferenceCleanupId += hasCleanup;
  2746. GCETW(GC_SWEEP_WEAKREF_STOP, (this));
  2747. RECYCLER_PROFILE_EXEC_END(this, Js::SweepWeakPhase);
  2748. }
  2749. void
  2750. Recycler::SweepHeap(bool concurrent, RecyclerSweep& recyclerSweep)
  2751. {
  2752. Assert(!this->hasPendingDeleteGuestArena);
  2753. Assert(!this->isHeapEnumInProgress);
  2754. #if ENABLE_CONCURRENT_GC
  2755. Assert(!this->DoQueueTrackedObject());
  2756. if (concurrent)
  2757. {
  2758. collectionState = CollectionStateSetupConcurrentSweep;
  2759. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2760. if (CONFIG_FLAG(EnableBGFreeZero))
  2761. {
  2762. // Only queue up non-leaf pages- leaf pages don't need to be zeroed out
  2763. recyclerPageAllocator.StartQueueZeroPage();
  2764. recyclerLargeBlockPageAllocator.StartQueueZeroPage();
  2765. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2766. recyclerWithBarrierPageAllocator.StartQueueZeroPage();
  2767. #endif
  2768. }
  2769. #endif
  2770. }
  2771. else
  2772. #endif
  2773. {
  2774. Assert(!concurrent);
  2775. collectionState = CollectionStateSweep;
  2776. }
  2777. this->SweepWeakReference();
  2778. #if ENABLE_CONCURRENT_GC
  2779. if (concurrent)
  2780. {
  2781. GCETW(GC_SETUPBACKGROUNDSWEEP_START, (this));
  2782. }
  2783. else
  2784. #endif
  2785. {
  2786. GCETW(GC_SWEEP_START, (this));
  2787. }
  2788. recyclerPageAllocator.SuspendIdleDecommit();
  2789. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2790. recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
  2791. #endif
  2792. recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
  2793. autoHeap.Sweep(recyclerSweep, concurrent);
  2794. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2795. recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
  2796. #endif
  2797. recyclerPageAllocator.ResumeIdleDecommit();
  2798. recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
  2799. #if ENABLE_CONCURRENT_GC
  2800. if (concurrent)
  2801. {
  2802. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2803. if (CONFIG_FLAG(EnableBGFreeZero))
  2804. {
  2805. recyclerPageAllocator.StopQueueZeroPage();
  2806. recyclerLargeBlockPageAllocator.StopQueueZeroPage();
  2807. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2808. recyclerWithBarrierPageAllocator.StopQueueZeroPage();
  2809. #endif
  2810. }
  2811. #endif
  2812. GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
  2813. }
  2814. else
  2815. {
  2816. #if ENABLE_BACKGROUND_PAGE_ZEROING
  2817. if (CONFIG_FLAG(EnableBGFreeZero))
  2818. {
  2819. Assert(!recyclerPageAllocator.HasZeroQueuedPages());
  2820. Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
  2821. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  2822. Assert(!recyclerWithBarrierPageAllocator.HasZeroQueuedPages());
  2823. #endif
  2824. }
  2825. #endif
  2826. uint sweptBytes = 0;
  2827. #ifdef RECYCLER_STATS
  2828. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2829. #endif
  2830. GCETW(GC_SWEEP_STOP, (this, sweptBytes));
  2831. }
  2832. #endif
  2833. }
  2834. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  2835. void
  2836. Recycler::BackgroundFinishPartialCollect(RecyclerSweep * recyclerSweep)
  2837. {
  2838. Assert(this->inPartialCollectMode);
  2839. Assert(recyclerSweep != nullptr && recyclerSweep->IsBackground());
  2840. this->hasBackgroundFinishPartial = true;
  2841. this->autoHeap.FinishPartialCollect(recyclerSweep);
  2842. this->inPartialCollectMode = false;
  2843. }
  2844. #endif
  2845. void
  2846. Recycler::DisposeObjects()
  2847. {
  2848. Assert(this->allowDispose && this->hasDisposableObject && !this->inDispose);
  2849. Assert(!isHeapEnumInProgress);
  2850. GCETW(GC_DISPOSE_START, (this));
  2851. ASYNC_HOST_OPERATION_START(collectionWrapper);
  2852. this->inDispose = true;
  2853. #ifdef PROFILE_RECYCLER_ALLOC
  2854. // finalizer may allocate memory and dispose object can happen in the middle of allocation
  2855. // save and restore the tracked object info
  2856. TrackAllocData oldAllocData = { 0 };
  2857. if (trackerDictionary != nullptr)
  2858. {
  2859. oldAllocData = nextAllocData;
  2860. nextAllocData.Clear();
  2861. }
  2862. #endif
  2863. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  2864. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2865. {
  2866. Output::Print(_u("Disposing objects\n"));
  2867. }
  2868. #endif
  2869. // Disable dispose within this method, restore it when we're done
  2870. AutoRestoreValue<bool> disableDispose(&this->allowDispose, false);
  2871. #ifdef FAULT_INJECTION
  2872. this->collectionWrapper->DisposeScriptContextByFaultInjectionCallBack();
  2873. #endif
  2874. // Scope timestamp to just dispose
  2875. {
  2876. AUTO_TIMESTAMP(dispose);
  2877. autoHeap.DisposeObjects();
  2878. }
  2879. #ifdef PROFILE_RECYCLER_ALLOC
  2880. if (trackerDictionary != nullptr)
  2881. {
  2882. Assert(nextAllocData.IsEmpty());
  2883. nextAllocData = oldAllocData;
  2884. }
  2885. #endif
  2886. #ifdef ENABLE_PROJECTION
  2887. {
  2888. Assert(!this->inResolveExternalWeakReferences);
  2889. Assert(!this->allowDispose);
  2890. #if DBG || defined RECYCLER_TRACE
  2891. AutoRestoreValue<bool> inResolveExternalWeakReferencedObjects(&this->inResolveExternalWeakReferences, true);
  2892. #endif
  2893. AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
  2894. // This is where it is safe to resolve external weak references as they can lead to new script entry
  2895. collectionWrapper->ResolveExternalWeakReferencedObjects();
  2896. }
  2897. #endif
  2898. Assert(!this->inResolveExternalWeakReferences);
  2899. Assert(this->inDispose);
  2900. this->inDispose = false;
  2901. ASYNC_HOST_OPERATION_END(collectionWrapper);
  2902. uint sweptBytes = 0;
  2903. #ifdef RECYCLER_STATS
  2904. sweptBytes = (uint)collectionStats.objectSweptBytes;
  2905. #endif
  2906. GCETW(GC_DISPOSE_STOP, (this, sweptBytes));
  2907. }
  2908. bool
  2909. Recycler::FinishDisposeObjects()
  2910. {
  2911. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] AllowDispose in FinishDisposeObject: %d\n"), this->allowDispose);
  2912. if (this->hasDisposableObject && this->allowDispose)
  2913. {
  2914. CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] FinishDisposeObject, calling Dispose: %d\n"), this->allowDispose);
  2915. #ifdef RECYCLER_TRACE
  2916. CollectionParam savedCollectionParam = collectionParam;
  2917. #endif
  2918. DisposeObjects();
  2919. #ifdef RECYCLER_TRACE
  2920. collectionParam = savedCollectionParam;
  2921. #endif
  2922. // FinishDisposeObjects is always called either during a collection,
  2923. // or we will check the NeedExhaustiveRepeatCollect(), so no need to check it here
  2924. return true;
  2925. }
  2926. #ifdef RECYCLER_TRACE
  2927. if (!this->inDispose && this->hasDisposableObject
  2928. && GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2929. {
  2930. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Dispose object delayed"));
  2931. }
  2932. #endif
  2933. return false;
  2934. }
  2935. template bool Recycler::FinishDisposeObjectsNow<FinishDispose>();
  2936. template bool Recycler::FinishDisposeObjectsNow<FinishDisposeTimed>();
  2937. template <CollectionFlags flags>
  2938. bool
  2939. Recycler::FinishDisposeObjectsNow()
  2940. {
  2941. if (inDisposeWrapper)
  2942. {
  2943. return false;
  2944. }
  2945. return FinishDisposeObjectsWrapped<flags>();
  2946. }
  2947. template <CollectionFlags flags>
  2948. inline
  2949. bool
  2950. Recycler::FinishDisposeObjectsWrapped()
  2951. {
  2952. const BOOL allowDisposeFlag = flags & CollectOverride_AllowDispose;
  2953. if (allowDisposeFlag && this->NeedDispose())
  2954. {
  2955. if ((flags & CollectHeuristic_TimeIfScriptActive) == CollectHeuristic_TimeIfScriptActive)
  2956. {
  2957. if (!this->NeedDisposeTimed())
  2958. {
  2959. return false;
  2960. }
  2961. }
  2962. this->allowDispose = true;
  2963. this->inDisposeWrapper = true;
  2964. #ifdef RECYCLER_TRACE
  2965. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  2966. {
  2967. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Process delayed dispose object"));
  2968. }
  2969. #endif
  2970. collectionWrapper->DisposeObjects(this);
  2971. // Dispose may get into message loop and cause a reentrant GC. If those don't allow reentrant
  2972. // it will get added to a pending collect request.
  2973. // FinishDisposedObjectsWrapped/DisposeObjectsWrapped is called at a place that might not be during a collection
  2974. // and won't check NeedExhaustiveRepeatCollect(), need to check it here to honor those requests
  2975. if (!this->CollectionInProgress() && NeedExhaustiveRepeatCollect() && ((flags & CollectOverride_NoExhaustiveCollect) != CollectOverride_NoExhaustiveCollect))
  2976. {
  2977. #ifdef RECYCLER_TRACE
  2978. CaptureCollectionParam((CollectionFlags)(flags & ~CollectMode_Partial), true);
  2979. #endif
  2980. DoCollectWrapped((CollectionFlags)(flags & ~CollectMode_Partial));
  2981. }
  2982. this->inDisposeWrapper = false;
  2983. return true;
  2984. }
  2985. return false;
  2986. }
  2987. /*------------------------------------------------------------------------------------------------
  2988. * Collect
  2989. *------------------------------------------------------------------------------------------------*/
  2990. BOOL
  2991. Recycler::CollectOnAllocatorThread()
  2992. {
  2993. #if ENABLE_PARTIAL_GC
  2994. Assert(!inPartialCollectMode);
  2995. #endif
  2996. #ifdef RECYCLER_TRACE
  2997. PrintCollectTrace(Js::GarbageCollectPhase);
  2998. #endif
  2999. this->CollectionBegin<Js::GarbageCollectPhase>();
  3000. this->Mark();
  3001. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3002. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3003. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3004. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3005. // turned off, it will remain off until a concurrent GC happens
  3006. this->Sweep();
  3007. this->CollectionEnd<Js::GarbageCollectPhase>();
  3008. FinishCollection();
  3009. return true;
  3010. }
  3011. // Explicitly instantiate all possible modes
  3012. template BOOL Recycler::CollectNow<CollectOnScriptIdle>();
  3013. template BOOL Recycler::CollectNow<CollectOnScriptExit>();
  3014. template BOOL Recycler::CollectNow<CollectOnAllocation>();
  3015. template BOOL Recycler::CollectNow<CollectOnTypedArrayAllocation>();
  3016. template BOOL Recycler::CollectNow<CollectOnScriptCloseNonPrimary>();
  3017. template BOOL Recycler::CollectNow<CollectExhaustiveCandidate>();
  3018. template BOOL Recycler::CollectNow<CollectNowConcurrent>();
  3019. template BOOL Recycler::CollectNow<CollectNowExhaustive>();
  3020. template BOOL Recycler::CollectNow<CollectNowDecommitNowExplicit>();
  3021. template BOOL Recycler::CollectNow<CollectNowPartial>();
  3022. template BOOL Recycler::CollectNow<CollectNowConcurrentPartial>();
  3023. template BOOL Recycler::CollectNow<CollectNowForceInThread>();
  3024. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternal>();
  3025. template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalNoStack>();
  3026. template BOOL Recycler::CollectNow<CollectOnRecoverFromOutOfMemory>();
  3027. template BOOL Recycler::CollectNow<CollectNowDefault>();
  3028. template BOOL Recycler::CollectNow<CollectOnSuspendCleanup>();
  3029. template BOOL Recycler::CollectNow<CollectNowDefaultLSCleanup>();
  3030. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  3031. template BOOL Recycler::CollectNow<CollectNowFinalGC>();
  3032. #endif
  3033. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3034. template BOOL Recycler::CollectNow<CollectNowExhaustiveSkipStack>();
  3035. #endif
  3036. template <CollectionFlags flags>
  3037. BOOL
  3038. Recycler::CollectNow()
  3039. {
  3040. // Force-in-thread cannot be concurrent or partial
  3041. CompileAssert((flags & CollectOverride_ForceInThread) == 0 || (flags & (CollectMode_Concurrent | CollectMode_Partial)) == 0);
  3042. // Collections not allowed when the recycler is currently executing the PostCollectionCallback
  3043. if (collectionState == CollectionStatePostCollectionCallback)
  3044. {
  3045. return false;
  3046. }
  3047. #if ENABLE_DEBUG_CONFIG_OPTIONS
  3048. if ((disableCollection && (flags & CollectOverride_Explicit) == 0) || isShuttingDown)
  3049. #else
  3050. if (isShuttingDown)
  3051. #endif
  3052. {
  3053. Assert(collectionState == CollectionStateNotCollecting
  3054. || collectionState == CollectionStateExit
  3055. || this->isShuttingDown);
  3056. return false;
  3057. }
  3058. if (flags & CollectOverride_ExhaustiveCandidate)
  3059. {
  3060. return CollectWithExhaustiveCandidate<flags>();
  3061. }
  3062. return CollectInternal<flags>();
  3063. }
  3064. template <CollectionFlags flags>
  3065. BOOL
  3066. Recycler::GetPartialFlag()
  3067. {
  3068. #if ENABLE_PARTIAL_GC
  3069. #pragma prefast(suppress:6313, "flags is a template parameter and can be 0")
  3070. return(flags & CollectMode_Partial) && inPartialCollectMode;
  3071. #else
  3072. return false;
  3073. #endif
  3074. }
  3075. template <CollectionFlags flags>
  3076. BOOL
  3077. Recycler::CollectWithExhaustiveCandidate()
  3078. {
  3079. Assert(flags & CollectOverride_ExhaustiveCandidate);
  3080. // Currently we don't have any exhaustive candidate that has heuristic.
  3081. Assert((flags & CollectHeuristic_Mask & ~CollectHeuristic_Never) == 0);
  3082. this->hasExhaustiveCandidate = true;
  3083. if (flags & CollectHeuristic_Never)
  3084. {
  3085. // This is just an exhaustive candidate notification. Don't trigger a GC.
  3086. return false;
  3087. }
  3088. // Continue with the GC heuristic
  3089. return CollectInternal<flags>();
  3090. }
  3091. template <CollectionFlags flags>
  3092. BOOL
  3093. Recycler::CollectInternal()
  3094. {
  3095. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3096. Assert((flags & CollectHeuristic_Never) == 0);
  3097. // If we're in a re-entrant state, we want to allow GC to be triggered only
  3098. // from allocation (or trigger points with AllowReentrant). This is to minimize
  3099. // the number of reentrant GCs
  3100. if ((flags & CollectOverride_AllowReentrant) == 0 && this->inDispose)
  3101. {
  3102. return false;
  3103. }
  3104. #ifdef RECYCLER_TRACE
  3105. CaptureCollectionParam(flags);
  3106. #endif
  3107. #if ENABLE_CONCURRENT_GC
  3108. const BOOL concurrent = flags & CollectMode_Concurrent;
  3109. const BOOL finishConcurrent = flags & CollectOverride_FinishConcurrent;
  3110. // If we priority boosted, we should try to finish it every chance we get
  3111. // Otherwise, we should finishing it if we are not doing a concurrent GC,
  3112. // or the flags tell us to always try to finish a concurrent GC (CollectOverride_FinishConcurrent)
  3113. if ((!concurrent || finishConcurrent || priorityBoost) && this->CollectionInProgress())
  3114. {
  3115. return TryFinishConcurrentCollect<flags>();
  3116. }
  3117. #endif
  3118. if (flags & CollectHeuristic_Mask)
  3119. {
  3120. // Check some heuristics first before starting a collection
  3121. return CollectWithHeuristic<flags>();
  3122. }
  3123. // Start a collection now.
  3124. return Collect<flags>();
  3125. }
  3126. template <CollectionFlags flags>
  3127. BOOL
  3128. Recycler::CollectWithHeuristic()
  3129. {
  3130. // CollectHeuristic_Never flag should only be used with exhaustive candidate
  3131. Assert((flags & CollectHeuristic_Never) == 0);
  3132. BOOL isScriptContextCloseGCPending = FALSE;
  3133. const BOOL allocSize = flags & CollectHeuristic_AllocSize;
  3134. const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
  3135. const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
  3136. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (flags & CollectHeuristic_Time);
  3137. if ((flags & CollectOverride_CheckScriptContextClose) != 0)
  3138. {
  3139. isScriptContextCloseGCPending = this->collectionWrapper->GetIsScriptContextCloseGCPending();
  3140. }
  3141. // If there is a script context close GC pending, we need to do a GC regardless
  3142. // Otherwise, we should check the heuristics to see if a GC is necessary
  3143. if (!isScriptContextCloseGCPending)
  3144. {
  3145. #if ENABLE_PARTIAL_GC
  3146. if (GetPartialFlag<flags>())
  3147. {
  3148. Assert(enablePartialCollect);
  3149. Assert(allocSize);
  3150. Assert(this->uncollectedNewPageCountPartialCollect >= RecyclerSweep::MinPartialUncollectedNewPageCount
  3151. && this->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
  3152. // PARTIAL-GC-REVIEW: For now, we have only alloc size heuristic
  3153. // Maybe improve this heuristic by looking at how many free pages are in the page allocator.
  3154. if (autoHeap.uncollectedNewPageCount > this->uncollectedNewPageCountPartialCollect)
  3155. {
  3156. return Collect<flags>();
  3157. }
  3158. }
  3159. #endif
  3160. // allocation byte count heuristic, collect every 1 MB allocated
  3161. if (allocSize && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::UncollectedAllocBytesCollection()))
  3162. {
  3163. return FinishDisposeObjectsWrapped<flags>();
  3164. }
  3165. // time heuristic, allocate every 1000 clock tick, or 64 MB is allocated in a short time
  3166. if (timed && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::Instance.MaxUncollectedAllocBytes))
  3167. {
  3168. uint currentTickCount = GetTickCount();
  3169. #ifdef RECYCLER_TRACE
  3170. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3171. #endif
  3172. if ((int)(tickCountNextCollection - currentTickCount) >= 0)
  3173. {
  3174. return FinishDisposeObjectsWrapped<flags>();
  3175. }
  3176. }
  3177. #ifdef RECYCLER_TRACE
  3178. else
  3179. {
  3180. uint currentTickCount = GetTickCount();
  3181. collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
  3182. }
  3183. #endif
  3184. }
  3185. // Passed all the heuristic, do some GC work, maybe
  3186. return Collect<(CollectionFlags)(flags & ~CollectMode_Partial)>();
  3187. }
  3188. template <CollectionFlags flags>
  3189. BOOL
  3190. Recycler::Collect()
  3191. {
  3192. #if ENABLE_CONCURRENT_GC
  3193. if (this->CollectionInProgress())
  3194. {
  3195. // If we are forced in thread, we can't be concurrent
  3196. // If we are not concurrent we should have been handled before in CollectInternal and we shouldn't be here
  3197. Assert((flags & CollectOverride_ForceInThread) == 0);
  3198. Assert((flags & CollectMode_Concurrent) != 0);
  3199. return TryFinishConcurrentCollect<flags>();
  3200. }
  3201. #endif
  3202. // We clear the flag indicating that there is a GC pending because
  3203. // of script context close, since we're about to do a GC anyway,
  3204. // since the current GC will suffice.
  3205. this->collectionWrapper->ClearIsScriptContextCloseGCPending();
  3206. SetupPostCollectionFlags<flags>();
  3207. const BOOL partial = GetPartialFlag<flags>();
  3208. CollectionFlags finalFlags = flags;
  3209. if (!partial)
  3210. {
  3211. finalFlags = (CollectionFlags)(flags & ~CollectMode_Partial);
  3212. }
  3213. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3214. // in SetupPostCollectionFlags because we didn't reset the inExhausitvECollection variable if
  3215. // an exception. Use this flag to disable it the assertion if exception occur
  3216. DebugOnly(this->hasIncompleteDoCollect = true);
  3217. {
  3218. RECORD_TIMESTAMP(initialCollectionStartTime);
  3219. #ifdef NTBUILD
  3220. this->telemetryBlock->initialCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3221. this->telemetryBlock->exhaustiveRepeatedCount = 0;
  3222. #endif
  3223. return DoCollectWrapped(finalFlags);
  3224. }
  3225. }
  3226. template <CollectionFlags flags>
  3227. void Recycler::SetupPostCollectionFlags()
  3228. {
  3229. // If we are not in a collection (collection in progress or in dispose), inExhaustiveCollection should not be set
  3230. // Otherwise, we have missed an exhaustive collection.
  3231. Assert(this->hasIncompleteDoCollect ||
  3232. this->CollectionInProgress() || this->inDispose || (!this->inExhaustiveCollection && !this->inDecommitNowCollection));
  3233. // Record whether we want to start exhaustive detection or do decommit now after GC
  3234. const BOOL exhaustive = flags & CollectMode_Exhaustive;
  3235. const BOOL decommitNow = flags & CollectMode_DecommitNow;
  3236. const BOOL cacheCleanup = flags & CollectMode_CacheCleanup;
  3237. if (decommitNow)
  3238. {
  3239. this->inDecommitNowCollection = true;
  3240. }
  3241. if (exhaustive)
  3242. {
  3243. this->inExhaustiveCollection = true;
  3244. }
  3245. if (cacheCleanup)
  3246. {
  3247. this->inCacheCleanupCollection = true;
  3248. }
  3249. }
  3250. BOOL
  3251. Recycler::DoCollectWrapped(CollectionFlags flags)
  3252. {
  3253. #if ENABLE_CONCURRENT_GC
  3254. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  3255. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  3256. #endif
  3257. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  3258. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
  3259. #if ENABLE_CONCURRENT_GC
  3260. Assert(IsConcurrentExecutingState() || IsConcurrentFinishedState() || !CollectionInProgress());
  3261. #else
  3262. Assert(!CollectionInProgress());
  3263. #endif
  3264. return collected;
  3265. }
  3266. bool
  3267. Recycler::NeedExhaustiveRepeatCollect() const
  3268. {
  3269. return this->inExhaustiveCollection && this->hasExhaustiveCandidate;
  3270. }
  3271. BOOL
  3272. Recycler::DoCollect(CollectionFlags flags)
  3273. {
  3274. // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
  3275. // in SetupPostCollectionFlags because we didn't reset the inExhaustiveCollection variable if
  3276. // an exception. We are not in DoCollect, there shouldn't be any more exception. Reset the flag
  3277. DebugOnly(this->hasIncompleteDoCollect = false);
  3278. #ifdef RECYCLER_MEMORY_VERIFY
  3279. this->Verify(Js::RecyclerPhase);
  3280. #endif
  3281. #ifdef RECYCLER_FINALIZE_CHECK
  3282. autoHeap.VerifyFinalize();
  3283. #endif
  3284. #if ENABLE_PARTIAL_GC
  3285. BOOL partial = flags & CollectMode_Partial;
  3286. #if DBG && defined(RECYCLER_DUMP_OBJECT_GRAPH)
  3287. // Can't pass in RecyclerPartialStress and DumpObjectGraphOnCollect or call CollectGarbage with DumpObjectGraph
  3288. if (GetRecyclerFlagsTable().RecyclerPartialStress) {
  3289. Assert(!GetRecyclerFlagsTable().DumpObjectGraphOnCollect && !this->dumpObjectOnceOnCollect);
  3290. } else if (GetRecyclerFlagsTable().DumpObjectGraphOnCollect || this->dumpObjectOnceOnCollect) {
  3291. Assert(!GetRecyclerFlagsTable().RecyclerPartialStress);
  3292. }
  3293. #endif
  3294. #ifdef RECYCLER_STRESS
  3295. if (partial && GetRecyclerFlagsTable().RecyclerPartialStress)
  3296. {
  3297. this->inPartialCollectMode = true;
  3298. this->forcePartialScanStack = true;
  3299. }
  3300. #endif
  3301. #endif
  3302. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  3303. if (dumpObjectOnceOnCollect || GetRecyclerFlagsTable().DumpObjectGraphOnCollect)
  3304. {
  3305. DumpObjectGraph();
  3306. dumpObjectOnceOnCollect = false;
  3307. #if ENABLE_PARTIAL_GC
  3308. // Can't do a partial collect if DumpObjectGraph is set since it'll call FinishPartial
  3309. // which will set inPartialCollectMode to false.
  3310. partial = false;
  3311. #endif
  3312. }
  3313. #endif
  3314. #if ENABLE_CONCURRENT_GC
  3315. const bool concurrent = (flags & CollectMode_Concurrent) != 0;
  3316. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  3317. #else
  3318. const bool concurrent = false;
  3319. #endif
  3320. // Flush the pending dispose objects first if dispose is allowed
  3321. Assert(!this->CollectionInProgress());
  3322. #if ENABLE_CONCURRENT_GC
  3323. Assert(this->backgroundFinishMarkCount == 0);
  3324. #endif
  3325. bool collected = FinishDisposeObjects();
  3326. do
  3327. {
  3328. INC_TIMESTAMP_FIELD(exhaustiveRepeatedCount);
  3329. RECORD_TIMESTAMP(currentCollectionStartTime);
  3330. #ifdef NTBUILD
  3331. this->telemetryBlock->currentCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
  3332. #endif
  3333. #if ENABLE_CONCURRENT_GC
  3334. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3335. if (this->CollectionInProgress())
  3336. {
  3337. Assert(this->IsConcurrentState());
  3338. Assert(collected);
  3339. if (forceInThread)
  3340. {
  3341. return this->FinishConcurrentCollect(flags);
  3342. }
  3343. return true;
  3344. }
  3345. Assert(this->backgroundFinishMarkCount == 0);
  3346. #endif
  3347. #if DBG
  3348. collectionCount++;
  3349. #endif
  3350. collectionState = Collection_PreCollection;
  3351. collectionWrapper->PreCollectionCallBack(flags);
  3352. collectionState = CollectionStateNotCollecting;
  3353. hasExhaustiveCandidate = false; // reset the candidate detection
  3354. #ifdef RECYCLER_STATS
  3355. #if ENABLE_PARTIAL_GC
  3356. RecyclerCollectionStats oldCollectionStats = collectionStats;
  3357. #endif
  3358. memset(&collectionStats, 0, sizeof(RecyclerCollectionStats));
  3359. this->collectionStats.startCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  3360. #if ENABLE_PARTIAL_GC
  3361. this->collectionStats.startCollectNewPageCount = autoHeap.uncollectedNewPageCount;
  3362. this->collectionStats.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  3363. #endif
  3364. #endif
  3365. #if ENABLE_PARTIAL_GC
  3366. if (partial)
  3367. {
  3368. #if ENABLE_CONCURRENT_GC
  3369. Assert(!forceInThread);
  3370. #endif
  3371. #ifdef RECYCLER_STATS
  3372. // We are only doing a partial GC, copy some old stats
  3373. collectionStats.finalizeCount = oldCollectionStats.finalizeCount;
  3374. memcpy(collectionStats.heapBlockCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3375. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3376. memcpy(collectionStats.heapBlockFreeByteCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3377. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3378. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
  3379. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
  3380. memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedBytes, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
  3381. sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
  3382. #endif
  3383. Assert(enablePartialCollect && inPartialCollectMode);
  3384. if (!this->PartialCollect(concurrent))
  3385. {
  3386. return collected;
  3387. }
  3388. // This disable partial if we do a repeated exhaustive GC
  3389. partial = false;
  3390. collected = true;
  3391. continue;
  3392. }
  3393. // Not doing partial collect, we should decommit on finish collect
  3394. decommitOnFinish = true;
  3395. if (inPartialCollectMode)
  3396. {
  3397. // finish the partial collect first
  3398. FinishPartialCollect();
  3399. // Old heap block with free object is made available, count that as being collected
  3400. collected = true;
  3401. // PARTIAL-GC-CONSIDER: should we just pretend we did a GC, since we have made the free listed object
  3402. // available to be used, instead of starting off another GC?
  3403. }
  3404. #endif
  3405. #if ENABLE_CONCURRENT_GC
  3406. bool skipConcurrent = false;
  3407. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  3408. // If the below flag is passed in, skip doing a non-blocking concurrent collect. Instead,
  3409. // we will do a blocking concurrent collect, which is basically an in-thread GC
  3410. skipConcurrent = GetRecyclerFlagsTable().ForceBlockingConcurrentCollect;
  3411. #endif
  3412. // We are about to start a collection. Reset our heuristic counters now, so that
  3413. // any allocations that occur during concurrent collection count toward the next collection's threshold.
  3414. ResetHeuristicCounters();
  3415. if (concurrent && !skipConcurrent)
  3416. {
  3417. Assert(!forceInThread);
  3418. if (enableConcurrentMark)
  3419. {
  3420. if (StartBackgroundMarkCollect())
  3421. {
  3422. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3423. return collected;
  3424. }
  3425. // Either ResetWriteWatch failed or the thread service failed
  3426. // So concurrent mark is disabled, at least for now
  3427. }
  3428. if (enableConcurrentSweep)
  3429. {
  3430. if (StartConcurrentSweepCollect())
  3431. {
  3432. collected = true;
  3433. continue;
  3434. }
  3435. // out of memory during collection
  3436. return collected;
  3437. }
  3438. // concurrent collection failed, default back to non-concurrent collection
  3439. }
  3440. if (!forceInThread && enableConcurrentMark)
  3441. {
  3442. if (!CollectOnConcurrentThread())
  3443. {
  3444. // time out or out of memory during collection
  3445. return collected;
  3446. }
  3447. }
  3448. else
  3449. #endif
  3450. {
  3451. if (!CollectOnAllocatorThread())
  3452. {
  3453. // out of memory during collection
  3454. return collected;
  3455. }
  3456. }
  3457. collected = true;
  3458. #ifdef RECYCLER_TRACE
  3459. collectionParam.repeat = true;
  3460. #endif
  3461. }
  3462. while (this->NeedExhaustiveRepeatCollect());
  3463. #if ENABLE_CONCURRENT_GC
  3464. // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
  3465. if (this->CollectionInProgress())
  3466. {
  3467. Assert(this->IsConcurrentState());
  3468. Assert(collected);
  3469. return true;
  3470. }
  3471. #endif
  3472. EndCollection();
  3473. // Tell the caller whether we have finish a collection and there maybe free object to reuse
  3474. return collected;
  3475. }
  3476. void
  3477. Recycler::EndCollection()
  3478. {
  3479. #if ENABLE_CONCURRENT_GC
  3480. Assert(this->backgroundFinishMarkCount == 0);
  3481. #endif
  3482. Assert(!this->CollectionInProgress());
  3483. // no more collection is requested, we can turn exhaustive back off
  3484. this->inExhaustiveCollection = false;
  3485. if (this->inDecommitNowCollection || CUSTOM_CONFIG_FLAG(GetRecyclerFlagsTable(), ForceDecommitOnCollect))
  3486. {
  3487. #ifdef RECYCLER_TRACE
  3488. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3489. {
  3490. Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Decommit now"));
  3491. }
  3492. #endif
  3493. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  3494. {
  3495. pageAlloc->DecommitNow();
  3496. });
  3497. this->inDecommitNowCollection = false;
  3498. }
  3499. RECORD_TIMESTAMP(lastCollectionEndTime);
  3500. }
  3501. #if ENABLE_PARTIAL_GC
  3502. bool
  3503. Recycler::PartialCollect(bool concurrent)
  3504. {
  3505. Assert(IsMarkStackEmpty());
  3506. Assert(this->inPartialCollectMode);
  3507. Assert(collectionState == CollectionStateNotCollecting);
  3508. // Rescan again
  3509. collectionState = CollectionStateRescanFindRoots;
  3510. #if ENABLE_CONCURRENT_GC
  3511. if (concurrent && enableConcurrentMark && this->partialConcurrentNextCollection)
  3512. {
  3513. this->PrepareBackgroundFindRoots();
  3514. if (StartConcurrent(CollectionStateConcurrentFinishMark))
  3515. {
  3516. #ifdef RECYCLER_TRACE
  3517. PrintCollectTrace(Js::ConcurrentPartialCollectPhase);
  3518. #endif
  3519. return false;
  3520. }
  3521. this->RevertPrepareBackgroundFindRoots();
  3522. }
  3523. #endif
  3524. #ifdef RECYCLER_STRESS
  3525. if (forcePartialScanStack)
  3526. {
  3527. // Mark the roots since they need not have been marked
  3528. // in RecyclerPartialStress mode
  3529. this->RootMark(collectionState);
  3530. }
  3531. #endif
  3532. #ifdef RECYCLER_TRACE
  3533. PrintCollectTrace(Js::PartialCollectPhase);
  3534. #endif
  3535. bool needConcurrentSweep = false;
  3536. this->CollectionBegin<Js::PartialCollectPhase>();
  3537. size_t rescanRootBytes = FinishMark(INFINITE);
  3538. Assert(rescanRootBytes != Recycler::InvalidScanRootBytes);
  3539. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  3540. this->CollectionEnd<Js::PartialCollectPhase>();
  3541. // Only reset the new page counter
  3542. autoHeap.uncollectedNewPageCount = 0;
  3543. // Finish collection
  3544. FinishCollection(needConcurrentSweep);
  3545. return true;
  3546. }
  3547. void
  3548. Recycler::ProcessClientTrackedObjects()
  3549. {
  3550. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_START, (this));
  3551. Assert(this->inPartialCollectMode);
  3552. #if ENABLE_CONCURRENT_GC
  3553. Assert(!this->DoQueueTrackedObject());
  3554. #endif
  3555. if (!this->clientTrackedObjectList.Empty())
  3556. {
  3557. SListBase<void *>::Iterator iter(&this->clientTrackedObjectList);
  3558. while (iter.Next())
  3559. {
  3560. auto& reference = iter.Data();
  3561. this->TryMarkNonInterior(reference, &reference /* parentReference */); // Reference to inside the node
  3562. RECYCLER_STATS_INC(this, clientTrackedObjectCount);
  3563. }
  3564. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3565. }
  3566. GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_STOP, (this));
  3567. }
  3568. void
  3569. Recycler::ClearPartialCollect()
  3570. {
  3571. #if ENABLE_CONCURRENT_GC
  3572. Assert(!this->DoQueueTrackedObject());
  3573. #endif
  3574. this->autoHeap.unusedPartialCollectFreeBytes = 0;
  3575. this->partialUncollectedAllocBytes = 0;
  3576. this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
  3577. this->uncollectedNewPageCountPartialCollect = (size_t)-1;
  3578. }
  3579. void
  3580. Recycler::FinishPartialCollect(RecyclerSweep * recyclerSweep)
  3581. {
  3582. Assert(recyclerSweep == nullptr || !recyclerSweep->IsBackground());
  3583. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FinishPartialPhase);
  3584. Assert(inPartialCollectMode);
  3585. #if ENABLE_CONCURRENT_GC
  3586. Assert(!this->DoQueueTrackedObject());
  3587. #endif
  3588. autoHeap.FinishPartialCollect(recyclerSweep);
  3589. this->inPartialCollectMode = false;
  3590. ClearPartialCollect();
  3591. RECYCLER_PROFILE_EXEC_END(this, Js::FinishPartialPhase);
  3592. }
  3593. #endif
  3594. void
  3595. Recycler::EnsureNotCollecting()
  3596. {
  3597. #if ENABLE_CONCURRENT_GC
  3598. FinishConcurrent<ForceFinishCollection>();
  3599. #endif
  3600. Assert(!this->CollectionInProgress());
  3601. }
  3602. void Recycler::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  3603. {
  3604. // Make sure we are not collecting
  3605. EnsureNotCollecting();
  3606. #if ENABLE_PARTIAL_GC
  3607. // We are updating the free bit vector, messing up the partial collection state.
  3608. // Just get out of partial collect mode
  3609. // GC-CONSIDER: consider adding an option in FinishConcurrent to not get into partial collect mode during sweep.
  3610. if (inPartialCollectMode)
  3611. {
  3612. FinishPartialCollect();
  3613. }
  3614. #endif
  3615. autoHeap.EnumerateObjects(infoBits, CallBackFunction);
  3616. // GC-TODO: Explicit heap?
  3617. }
  3618. BOOL
  3619. Recycler::IsMarkState() const
  3620. {
  3621. return (collectionState & Collection_Mark);
  3622. }
  3623. BOOL
  3624. Recycler::IsFindRootsState() const
  3625. {
  3626. return (collectionState & Collection_FindRoots);
  3627. }
  3628. #if DBG
  3629. BOOL
  3630. Recycler::IsReentrantState() const
  3631. {
  3632. #if ENABLE_CONCURRENT_GC
  3633. return !this->CollectionInProgress() || this->IsConcurrentState();
  3634. #else
  3635. return !this->CollectionInProgress();
  3636. #endif
  3637. }
  3638. #endif
  3639. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  3640. template <Js::Phase phase> static ETWEventGCActivationKind GetETWEventGCActivationKind();
  3641. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::GarbageCollectPhase>() { return ETWEvent_GarbageCollect; }
  3642. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ThreadCollectPhase>() { return ETWEvent_ThreadCollect; }
  3643. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ConcurrentCollectPhase>() { return ETWEvent_ConcurrentCollect; }
  3644. template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::PartialCollectPhase>() { return ETWEvent_PartialCollect; }
  3645. #endif
  3646. template <Js::Phase phase>
  3647. void
  3648. Recycler::CollectionBegin()
  3649. {
  3650. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase, phase);
  3651. GCETW_INTERNAL(GC_START, (this, GetETWEventGCActivationKind<phase>()));
  3652. }
  3653. template <Js::Phase phase>
  3654. void
  3655. Recycler::CollectionEnd()
  3656. {
  3657. GCETW_INTERNAL(GC_STOP, (this, GetETWEventGCActivationKind<phase>()));
  3658. RECYCLER_PROFILE_EXEC_END2(this, phase, Js::RecyclerPhase);
  3659. }
  3660. #if ENABLE_CONCURRENT_GC
  3661. size_t
  3662. Recycler::BackgroundRescan(RescanFlags rescanFlags)
  3663. {
  3664. Assert(!this->isProcessingRescan);
  3665. DebugOnly(this->isProcessingRescan = true);
  3666. GCETW(GC_BACKGROUNDRESCAN_START, (this, backgroundRescanCount));
  3667. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRescanPhase);
  3668. size_t rescannedPageCount = heapBlockMap.Rescan(this, ((rescanFlags & RescanFlags_ResetWriteWatch) != 0));
  3669. rescannedPageCount += autoHeap.Rescan(rescanFlags);
  3670. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRescanPhase);
  3671. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, backgroundRescanCount));
  3672. this->backgroundRescanCount++;
  3673. if (!this->NeedOOMRescan())
  3674. {
  3675. if ((rescanFlags & RescanFlags_ResetWriteWatch) != 0)
  3676. {
  3677. DebugOnly(this->isProcessingRescan = false);
  3678. }
  3679. return rescannedPageCount;
  3680. }
  3681. DebugOnly(this->isProcessingRescan = false);
  3682. return Recycler::InvalidScanRootBytes;
  3683. }
  3684. void
  3685. Recycler::BackgroundResetWriteWatchAll()
  3686. {
  3687. GCETW(GC_BACKGROUNDRESETWRITEWATCH_START, (this, -1));
  3688. heapBlockMap.ResetDirtyPages(this);
  3689. GCETW(GC_BACKGROUNDRESETWRITEWATCH_STOP, (this, -1));
  3690. }
  3691. #endif
  3692. size_t
  3693. Recycler::FinishMarkRescan(bool background)
  3694. {
  3695. #if !ENABLE_CONCURRENT_GC
  3696. Assert(!background);
  3697. #endif
  3698. if (background)
  3699. {
  3700. GCETW(GC_BACKGROUNDRESCAN_START, (this, 0));
  3701. }
  3702. else
  3703. {
  3704. GCETW(GC_RESCAN_START, (this));
  3705. }
  3706. RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::RescanPhase);
  3707. #if ENABLE_CONCURRENT_GC
  3708. RescanFlags const flags = (background ? RescanFlags_ResetWriteWatch : RescanFlags_None);
  3709. #else
  3710. Assert(!background);
  3711. RescanFlags const flags = RescanFlags_None;
  3712. #endif
  3713. #if DBG
  3714. Assert(!this->isProcessingRescan);
  3715. this->isProcessingRescan = true;
  3716. #endif
  3717. #if ENABLE_CONCURRENT_GC
  3718. size_t scannedPageCount = heapBlockMap.Rescan(this, ((flags & RescanFlags_ResetWriteWatch) != 0));
  3719. scannedPageCount += autoHeap.Rescan(flags);
  3720. #else
  3721. size_t scannedPageCount = 0;
  3722. #endif
  3723. DebugOnly(this->isProcessingRescan = false);
  3724. RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::RescanPhase);
  3725. if (background)
  3726. {
  3727. GCETW(GC_BACKGROUNDRESCAN_STOP, (this, 0));
  3728. }
  3729. else
  3730. {
  3731. GCETW(GC_RESCAN_STOP, (this));
  3732. }
  3733. return scannedPageCount;
  3734. }
  3735. #if ENABLE_CONCURRENT_GC
  3736. void
  3737. Recycler::ProcessTrackedObjects()
  3738. {
  3739. GCETW(GC_PROCESS_TRACKED_OBJECT_START, (this));
  3740. #if ENABLE_PARTIAL_GC
  3741. Assert(this->clientTrackedObjectList.Empty());
  3742. Assert(!this->inPartialCollectMode);
  3743. #endif
  3744. Assert(this->DoQueueTrackedObject());
  3745. this->queueTrackedObject = false;
  3746. DebugOnly(this->isProcessingTrackedObjects = true);
  3747. markContext.ProcessTracked();
  3748. // If we did a parallel mark, we need to process any queued tracked objects from the parallel mark stack as well.
  3749. // If we didn't, this will do nothing.
  3750. parallelMarkContext1.ProcessTracked();
  3751. parallelMarkContext2.ProcessTracked();
  3752. parallelMarkContext3.ProcessTracked();
  3753. DebugOnly(this->isProcessingTrackedObjects = false);
  3754. GCETW(GC_PROCESS_TRACKED_OBJECT_STOP, (this));
  3755. }
  3756. #endif
  3757. BOOL
  3758. Recycler::RequestConcurrentWrapperCallback()
  3759. {
  3760. #if ENABLE_CONCURRENT_GC
  3761. Assert(!IsConcurrentExecutingState());
  3762. // Save the original collection state
  3763. CollectionState oldState = this->collectionState;
  3764. // Get the background thread to start the callback
  3765. if (StartConcurrent(CollectionStateConcurrentWrapperCallback))
  3766. {
  3767. // Wait for the callback to complete
  3768. WaitForConcurrentThread(INFINITE);
  3769. // The state must not change back until we restore the original state
  3770. Assert(collectionState == CollectionStateConcurrentWrapperCallback);
  3771. this->collectionState = oldState;
  3772. return true;
  3773. }
  3774. #endif
  3775. return false;
  3776. }
  3777. #if ENABLE_CONCURRENT_GC
  3778. /*------------------------------------------------------------------------------------------------
  3779. * Concurrent
  3780. *------------------------------------------------------------------------------------------------*/
  3781. BOOL
  3782. Recycler::CollectOnConcurrentThread()
  3783. {
  3784. #if ENABLE_PARTIAL_GC
  3785. Assert(!inPartialCollectMode);
  3786. #endif
  3787. #ifdef RECYCLER_TRACE
  3788. PrintCollectTrace(Js::ThreadCollectPhase);
  3789. #endif
  3790. this->CollectionBegin<Js::ThreadCollectPhase>();
  3791. // Synchronous concurrent mark
  3792. if (!StartSynchronousBackgroundMark())
  3793. {
  3794. this->CollectionEnd<Js::ThreadCollectPhase>();
  3795. return false;
  3796. }
  3797. const DWORD waitTime = RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  3798. GCETW(GC_SYNCHRONOUSMARKWAIT_START, (this, waitTime));
  3799. const BOOL waited = WaitForConcurrentThread(waitTime);
  3800. GCETW(GC_SYNCHRONOUSMARKWAIT_STOP, (this, !waited));
  3801. if (!waited)
  3802. {
  3803. #ifdef RECYCLER_TRACE
  3804. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase)
  3805. || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ThreadCollectPhase))
  3806. {
  3807. Output::Print(_u("%04X> RC(%p): %s: %s\n"), this->mainThreadId, this, Js::PhaseNames[Js::ThreadCollectPhase], _u("Timeout"));
  3808. }
  3809. #endif
  3810. this->CollectionEnd<Js::ThreadCollectPhase>();
  3811. return false;
  3812. }
  3813. // If the concurrent thread was done within the time limit, there shouldn't be
  3814. // any object needs to be rescanned
  3815. // CONCURRENT-TODO: Optimize it so we don't rescan in the background if we are still waiting
  3816. // GC-TODO: Unfortunately we can't assert this, as the background code gen thread may still
  3817. // touch GC memory (e.g. FunctionBody), causing write watch and rescan
  3818. // in the background.
  3819. // Assert(markContext.Empty());
  3820. DebugOnly(this->isProcessingRescan = false);
  3821. this->collectionState = CollectionStateMark;
  3822. this->ProcessTrackedObjects();
  3823. this->ProcessMark(false);
  3824. this->EndMark();
  3825. // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
  3826. // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
  3827. // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
  3828. // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
  3829. // turned off, it will remain off until a concurrent GC happens
  3830. this->Sweep();
  3831. this->CollectionEnd<Js::ThreadCollectPhase>();
  3832. FinishCollection();
  3833. return true;
  3834. }
  3835. // explicit instantiation
  3836. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdle>();
  3837. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdleAtRoot>();
  3838. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnExitScript>();
  3839. template BOOL Recycler::FinishConcurrent<FinishConcurrentOnEnterScript>();
  3840. template BOOL Recycler::FinishConcurrent<ForceFinishCollection>();
  3841. template <CollectionFlags flags>
  3842. BOOL
  3843. Recycler::FinishConcurrent()
  3844. {
  3845. CompileAssert((flags & ~(CollectOverride_AllowDispose | CollectOverride_ForceFinish | CollectOverride_ForceInThread
  3846. | CollectMode_Concurrent | CollectOverride_DisableIdleFinish | CollectOverride_BackgroundFinishMark
  3847. | CollectOverride_SkipStack | CollectOverride_FinishConcurrentTimeout)) == 0);
  3848. if (this->CollectionInProgress())
  3849. {
  3850. Assert(this->IsConcurrentEnabled());
  3851. Assert(IsConcurrentState());
  3852. const BOOL forceFinish = flags & CollectOverride_ForceFinish;
  3853. if (forceFinish || !IsConcurrentExecutingState())
  3854. {
  3855. #if ENABLE_BACKGROUND_PAGE_FREEING
  3856. if (CONFIG_FLAG(EnableBGFreeZero))
  3857. {
  3858. if (this->collectionState == CollectionStateConcurrentSweep)
  3859. {
  3860. // Help with the background thread to zero and flush zero pages
  3861. // if we are going to wait anyways.
  3862. recyclerPageAllocator.ZeroQueuedPages();
  3863. recyclerLargeBlockPageAllocator.ZeroQueuedPages();
  3864. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  3865. recyclerWithBarrierPageAllocator.ZeroQueuedPages();
  3866. #endif
  3867. this->FlushBackgroundPages();
  3868. }
  3869. }
  3870. #endif
  3871. #ifdef RECYCLER_TRACE
  3872. collectionParam.finishOnly = true;
  3873. collectionParam.flags = flags;
  3874. #endif
  3875. #if ENABLE_CONCURRENT_GC
  3876. // If SkipStack is provided, and we're not forcing the finish (i.e we're not in concurrent executing state)
  3877. // then, it's fine to set the skipStack flag to true, so that during the in-thread find-roots, we'll skip
  3878. // the stack scan
  3879. this->skipStack = ((flags & CollectOverride_SkipStack) != 0) && !forceFinish;
  3880. #if DBG
  3881. this->isFinishGCOnIdle = (flags == FinishConcurrentOnIdleAtRoot);
  3882. #endif
  3883. #endif
  3884. return FinishConcurrentCollectWrapped(flags);
  3885. }
  3886. }
  3887. return false;
  3888. }
  3889. template <CollectionFlags flags>
  3890. BOOL
  3891. Recycler::TryFinishConcurrentCollect()
  3892. {
  3893. Assert(this->CollectionInProgress());
  3894. RECYCLER_STATS_INC(this, finishCollectTryCount);
  3895. SetupPostCollectionFlags<flags>();
  3896. const BOOL concurrent = flags & CollectMode_Concurrent;
  3897. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  3898. Assert(this->IsConcurrentEnabled());
  3899. Assert(IsConcurrentState() || IsCollectionDisabled());
  3900. Assert(!concurrent || !forceInThread);
  3901. if (concurrent && concurrentThread != NULL)
  3902. {
  3903. if (IsConcurrentExecutingState())
  3904. {
  3905. if (!this->priorityBoost)
  3906. {
  3907. uint tickCount = GetTickCount();
  3908. if ((autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  3909. || (tickCount - this->tickCountStartConcurrent > RecyclerHeuristic::PriorityBoostTimeout(this->GetRecyclerFlagsTable())))
  3910. {
  3911. #ifdef RECYCLER_TRACE
  3912. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
  3913. {
  3914. Output::Print(_u("%04X> RC(%p): %s: "), this->mainThreadId, this, _u("Set priority normal"));
  3915. if (autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
  3916. {
  3917. Output::Print(_u("AllocBytes=%d (Time=%d)\n"), autoHeap.uncollectedAllocBytes, tickCount - this->tickCountStartConcurrent);
  3918. }
  3919. else
  3920. {
  3921. Output::Print(_u("Time=%d (AllocBytes=%d\n"), tickCount - this->tickCountStartConcurrent, autoHeap.uncollectedAllocBytes);
  3922. }
  3923. }
  3924. #endif
  3925. // Set it to a large number so we don't set the thread priority again
  3926. this->priorityBoost = true;
  3927. // The recycler thread hasn't come back in 5 seconds
  3928. // It either has a large object graph, or it is starving.
  3929. // Set the priority back to normal
  3930. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  3931. }
  3932. }
  3933. return FinishDisposeObjectsWrapped<flags>();
  3934. }
  3935. else if ((flags & CollectOverride_FinishConcurrentTimeout) != 0)
  3936. {
  3937. uint tickCount = GetTickCount();
  3938. // If we haven't gone past the time to call finish collection,
  3939. // simply call FinishDisposeObjects and return
  3940. // Otherwise, actually go ahead and call FinishConcurrentCollectWrapped
  3941. // We do this only if this is a collection that allows finish concurrent to timeout
  3942. // If not, by default, we finish the collection
  3943. if (tickCount <= this->tickCountNextFinishCollection)
  3944. {
  3945. return FinishDisposeObjectsWrapped<flags>();
  3946. }
  3947. }
  3948. }
  3949. return FinishConcurrentCollectWrapped(flags);
  3950. }
  3951. BOOL
  3952. Recycler::IsConcurrentMarkState() const
  3953. {
  3954. return (collectionState & Collection_ConcurrentMark) == Collection_ConcurrentMark;
  3955. }
  3956. BOOL
  3957. Recycler::IsConcurrentMarkExecutingState() const
  3958. {
  3959. return (collectionState & (Collection_ConcurrentMark | Collection_ExecutingConcurrent)) == (Collection_ConcurrentMark | Collection_ExecutingConcurrent);
  3960. }
  3961. BOOL
  3962. Recycler::IsConcurrentResetMarksState() const
  3963. {
  3964. return collectionState == CollectionStateConcurrentResetMarks;
  3965. }
  3966. BOOL
  3967. Recycler::IsInThreadFindRootsState() const
  3968. {
  3969. CollectionState currentCollectionState = collectionState;
  3970. return (currentCollectionState & Collection_FindRoots) && (currentCollectionState != CollectionStateConcurrentFindRoots);
  3971. }
  3972. BOOL
  3973. Recycler::IsConcurrentFindRootState() const
  3974. {
  3975. return collectionState == CollectionStateConcurrentFindRoots;
  3976. }
  3977. BOOL
  3978. Recycler::IsConcurrentExecutingState() const
  3979. {
  3980. return (collectionState & Collection_ExecutingConcurrent);
  3981. }
  3982. BOOL
  3983. Recycler::IsConcurrentSweepExecutingState() const
  3984. {
  3985. return (collectionState & (Collection_ConcurrentSweep | Collection_ExecutingConcurrent)) == (Collection_ConcurrentSweep | Collection_ExecutingConcurrent);
  3986. }
  3987. BOOL
  3988. Recycler::IsConcurrentState() const
  3989. {
  3990. return (collectionState & Collection_Concurrent);
  3991. }
  3992. #if DBG
  3993. BOOL
  3994. Recycler::IsConcurrentFinishedState() const
  3995. {
  3996. return (collectionState & Collection_FinishConcurrent);
  3997. }
  3998. #endif
  3999. bool
  4000. Recycler::InitializeConcurrent(JsUtil::ThreadService *threadService)
  4001. {
  4002. try
  4003. {
  4004. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_OutOfMemory);
  4005. concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4006. if (concurrentWorkDoneEvent == nullptr)
  4007. {
  4008. throw Js::OutOfMemoryException();
  4009. }
  4010. #if DBG_DUMP
  4011. markContext.GetPageAllocator()->debugName = _u("ConcurrentCollect");
  4012. #endif
  4013. if (!threadService->HasCallback())
  4014. {
  4015. #ifdef IDLE_DECOMMIT_ENABLED
  4016. concurrentIdleDecommitEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4017. if (concurrentIdleDecommitEvent == nullptr)
  4018. {
  4019. throw Js::OutOfMemoryException();
  4020. }
  4021. #endif
  4022. concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  4023. if (concurrentWorkReadyEvent == nullptr)
  4024. {
  4025. throw Js::OutOfMemoryException();
  4026. }
  4027. }
  4028. }
  4029. catch (Js::OutOfMemoryException)
  4030. {
  4031. Assert(concurrentWorkReadyEvent == nullptr);
  4032. if (concurrentWorkDoneEvent)
  4033. {
  4034. CloseHandle(concurrentWorkDoneEvent);
  4035. concurrentWorkDoneEvent = nullptr;
  4036. }
  4037. #ifdef IDLE_DECOMMIT_ENABLED
  4038. if (concurrentIdleDecommitEvent)
  4039. {
  4040. CloseHandle(concurrentIdleDecommitEvent);
  4041. concurrentIdleDecommitEvent = nullptr;
  4042. }
  4043. #endif
  4044. return false;
  4045. }
  4046. return true;
  4047. }
  4048. #pragma prefast(suppress:6262, "Where this function is call should have ample of stack space")
  4049. bool Recycler::AbortConcurrent(bool restoreState)
  4050. {
  4051. Assert(!this->CollectionInProgress() || this->IsConcurrentState());
  4052. // In case the thread already died, wait for that too
  4053. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4054. // Note, concurrentThread will be null if we have a threadService.
  4055. Assert(concurrentThread != NULL || threadService->HasCallback());
  4056. DWORD handleCount = (concurrentThread == NULL ? 1 : 2);
  4057. DWORD ret = WAIT_OBJECT_0;
  4058. if (this->IsConcurrentState())
  4059. {
  4060. this->isAborting = true;
  4061. if (this->concurrentThread != NULL)
  4062. {
  4063. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4064. }
  4065. ret = WaitForMultipleObjectsEx(handleCount, handle, FALSE, INFINITE, FALSE);
  4066. this->isAborting = false;
  4067. Assert(this->IsConcurrentFinishedState() || ret == WAIT_OBJECT_0 + 1);
  4068. if (ret == WAIT_OBJECT_0 && restoreState)
  4069. {
  4070. if (collectionState == CollectionStateRescanWait)
  4071. {
  4072. this->ResetMarkCollectionState();
  4073. }
  4074. else if (collectionState == CollectionStateTransferSweptWait)
  4075. {
  4076. // Make sure we don't do another GC after finishing this one.
  4077. this->inExhaustiveCollection = false;
  4078. // Let's just finish the sweep so that GC is in a consistent state, but don't run dispose
  4079. // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
  4080. // FinishConcurrentCollect can wait for it again.
  4081. SetEvent(this->concurrentWorkDoneEvent);
  4082. EnsureNotCollecting();
  4083. }
  4084. else
  4085. {
  4086. Assert(UNREACHED);
  4087. }
  4088. Assert(collectionState == CollectionStateNotCollecting);
  4089. Assert(this->isProcessingRescan == false);
  4090. }
  4091. else
  4092. {
  4093. // Even if we weren't asked to restore states, we need to clean up the pending guest arena
  4094. CleanupPendingUnroot();
  4095. // Also need to release any pages held by the mark stack, if we abandoned it
  4096. markContext.Abort();
  4097. }
  4098. }
  4099. Assert(!this->hasPendingDeleteGuestArena);
  4100. return ret == WAIT_OBJECT_0;
  4101. }
  4102. void
  4103. Recycler::CleanupPendingUnroot()
  4104. {
  4105. Assert(!this->hasPendingConcurrentFindRoot);
  4106. if (hasPendingUnpinnedObject)
  4107. {
  4108. pinnedObjectMap.MapAndRemoveIf([](void * obj, PinRecord const &refCount)
  4109. {
  4110. #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
  4111. #ifdef STACK_BACK_TRACE
  4112. Assert(refCount != 0 || refCount.stackBackTraces == nullptr);
  4113. #endif
  4114. #endif
  4115. return refCount == 0;
  4116. });
  4117. hasPendingUnpinnedObject = false;
  4118. }
  4119. if (hasPendingDeleteGuestArena)
  4120. {
  4121. DebugOnly(bool foundPendingDelete = false);
  4122. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4123. while (guestArenaIter.Next())
  4124. {
  4125. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4126. if (allocator.pendingDelete)
  4127. {
  4128. allocator.SetLockBlockList(false);
  4129. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4130. DebugOnly(foundPendingDelete = true);
  4131. }
  4132. }
  4133. hasPendingDeleteGuestArena = false;
  4134. Assert(foundPendingDelete);
  4135. }
  4136. #if DBG
  4137. else
  4138. {
  4139. DListBase<GuestArenaAllocator>::Iterator guestArenaIter(&guestArenaList);
  4140. while (guestArenaIter.Next())
  4141. {
  4142. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4143. Assert(!allocator.pendingDelete);
  4144. }
  4145. }
  4146. #endif
  4147. }
  4148. void
  4149. Recycler::FinalizeConcurrent(bool restoreState)
  4150. {
  4151. bool needCleanExitState = restoreState;
  4152. #if defined(RECYCLER_DUMP_OBJECT_GRAPH)
  4153. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().DumpObjectGraphOnExit;
  4154. #endif
  4155. #ifdef LEAK_REPORT
  4156. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
  4157. #endif
  4158. #ifdef CHECK_MEMORY_LEAK
  4159. needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().CheckMemoryLeak;
  4160. #endif
  4161. bool aborted = AbortConcurrent(needCleanExitState);
  4162. collectionState = CollectionStateExit;
  4163. if (aborted && this->concurrentThread != NULL)
  4164. {
  4165. // In case the thread already died, wait for that too
  4166. HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
  4167. SetEvent(concurrentWorkReadyEvent);
  4168. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4169. // In case the thread already died, wait for that too
  4170. DWORD fRet = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4171. AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
  4172. }
  4173. // Shutdown parallel threads and return the handle for them so the caller can
  4174. // close it.
  4175. parallelThread1.Shutdown();
  4176. parallelThread2.Shutdown();
  4177. #ifdef IDLE_DECOMMIT_ENABLED
  4178. if (concurrentIdleDecommitEvent != nullptr)
  4179. {
  4180. CloseHandle(concurrentIdleDecommitEvent);
  4181. concurrentIdleDecommitEvent = nullptr;
  4182. }
  4183. #endif
  4184. CloseHandle(concurrentWorkDoneEvent);
  4185. concurrentWorkDoneEvent = nullptr;
  4186. if (concurrentWorkReadyEvent != NULL)
  4187. {
  4188. CloseHandle(concurrentWorkReadyEvent);
  4189. concurrentWorkReadyEvent = nullptr;
  4190. }
  4191. if (needCleanExitState)
  4192. {
  4193. // We may do another marking pass to look for memory leaks;
  4194. // Since we have shut down the concurrent thread, don't do a parallel mark.
  4195. this->enableConcurrentMark = false;
  4196. this->enableParallelMark = false;
  4197. this->enableConcurrentSweep = false;
  4198. }
  4199. this->threadService = nullptr;
  4200. this->concurrentThread = nullptr;
  4201. }
  4202. bool
  4203. Recycler::EnableConcurrent(JsUtil::ThreadService *threadService, bool startAllThreads)
  4204. {
  4205. if (this->disableConcurrent)
  4206. {
  4207. return false;
  4208. }
  4209. if (!this->InitializeConcurrent(threadService))
  4210. {
  4211. return false;
  4212. }
  4213. #if ENABLE_DEBUG_CONFIG_OPTIONS
  4214. this->enableConcurrentMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase);
  4215. this->enableParallelMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase);
  4216. this->enableConcurrentSweep = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase);
  4217. #else
  4218. this->enableConcurrentMark = true;
  4219. this->enableParallelMark = true;
  4220. this->enableConcurrentSweep = true;
  4221. #endif
  4222. if (this->enableParallelMark && this->maxParallelism == 1)
  4223. {
  4224. // Disable parallel mark if only 1 CPU
  4225. this->enableParallelMark = false;
  4226. }
  4227. if (threadService->HasCallback())
  4228. {
  4229. this->threadService = threadService;
  4230. return true;
  4231. }
  4232. else
  4233. {
  4234. bool startConcurrentThread = true;
  4235. bool startedParallelThread1 = false;
  4236. bool startedParallelThread2 = false;
  4237. if (startAllThreads)
  4238. {
  4239. if (this->enableParallelMark && this->maxParallelism > 2)
  4240. {
  4241. if (!parallelThread1.EnableConcurrent(true))
  4242. {
  4243. startConcurrentThread = false;
  4244. }
  4245. else
  4246. {
  4247. startedParallelThread1 = true;
  4248. if (this->maxParallelism > 3)
  4249. {
  4250. if (!parallelThread2.EnableConcurrent(true))
  4251. {
  4252. startConcurrentThread = false;
  4253. }
  4254. else
  4255. {
  4256. startedParallelThread2 = true;
  4257. }
  4258. }
  4259. }
  4260. }
  4261. }
  4262. if (startConcurrentThread)
  4263. {
  4264. HANDLE concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &Recycler::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
  4265. if (concurrentThread != nullptr)
  4266. {
  4267. // Wait for recycler thread to initialize
  4268. HANDLE handle[2] = { this->concurrentWorkDoneEvent, concurrentThread };
  4269. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  4270. if (ret == WAIT_OBJECT_0)
  4271. {
  4272. this->threadService = threadService;
  4273. this->concurrentThread = concurrentThread;
  4274. return true;
  4275. }
  4276. CloseHandle(concurrentThread);
  4277. }
  4278. }
  4279. if (startedParallelThread1)
  4280. {
  4281. parallelThread1.Shutdown();
  4282. if (startedParallelThread2)
  4283. {
  4284. parallelThread2.Shutdown();
  4285. }
  4286. }
  4287. }
  4288. // We failed to start a concurrent thread so we set these back to false and clean up
  4289. this->enableConcurrentMark = false;
  4290. this->enableParallelMark = false;
  4291. this->enableConcurrentSweep = false;
  4292. if (concurrentWorkReadyEvent)
  4293. {
  4294. CloseHandle(concurrentWorkReadyEvent);
  4295. concurrentWorkReadyEvent = nullptr;
  4296. }
  4297. if (concurrentWorkDoneEvent)
  4298. {
  4299. CloseHandle(concurrentWorkDoneEvent);
  4300. concurrentWorkDoneEvent = nullptr;
  4301. }
  4302. #ifdef IDLE_DECOMMIT_ENABLED
  4303. if (concurrentIdleDecommitEvent)
  4304. {
  4305. CloseHandle(concurrentIdleDecommitEvent);
  4306. concurrentIdleDecommitEvent = nullptr;
  4307. }
  4308. #endif
  4309. return false;
  4310. }
  4311. void
  4312. Recycler::ShutdownThread()
  4313. {
  4314. if (this->IsConcurrentEnabled())
  4315. {
  4316. Assert(concurrentThread != NULL || threadService->HasCallback());
  4317. FinalizeConcurrent(false);
  4318. if (concurrentThread)
  4319. {
  4320. CloseHandle(concurrentThread);
  4321. }
  4322. }
  4323. }
  4324. void
  4325. Recycler::DisableConcurrent()
  4326. {
  4327. if (this->IsConcurrentEnabled())
  4328. {
  4329. Assert(concurrentThread != NULL || threadService->HasCallback());
  4330. FinalizeConcurrent(true);
  4331. if (concurrentThread)
  4332. {
  4333. CloseHandle(concurrentThread);
  4334. }
  4335. this->collectionState = CollectionStateNotCollecting;
  4336. }
  4337. }
  4338. bool
  4339. Recycler::StartConcurrent(CollectionState const state)
  4340. {
  4341. // Reset the tick count to detect if the concurrent thread is taking too long
  4342. tickCountStartConcurrent = GetTickCount();
  4343. CollectionState oldState = this->collectionState;
  4344. this->collectionState = state;
  4345. if (threadService->HasCallback())
  4346. {
  4347. Assert(concurrentThread == NULL);
  4348. Assert(concurrentWorkReadyEvent == NULL);
  4349. if (!threadService->Invoke(Recycler::StaticBackgroundWorkCallback, this))
  4350. {
  4351. this->collectionState = oldState;
  4352. return false;
  4353. }
  4354. return true;
  4355. }
  4356. else
  4357. {
  4358. Assert(concurrentThread != NULL);
  4359. Assert(concurrentWorkReadyEvent != NULL);
  4360. SetEvent(concurrentWorkReadyEvent);
  4361. return true;
  4362. }
  4363. }
  4364. BOOL
  4365. Recycler::StartBackgroundMarkCollect()
  4366. {
  4367. #ifdef RECYCLER_TRACE
  4368. PrintCollectTrace(Js::ConcurrentMarkPhase);
  4369. #endif
  4370. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4371. // Asynchronous concurrent mark
  4372. BOOL success = StartAsynchronousBackgroundMark();
  4373. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4374. return success;
  4375. }
  4376. BOOL
  4377. Recycler::StartBackgroundMark(bool foregroundResetMark, bool foregroundFindRoots)
  4378. {
  4379. Assert(!this->CollectionInProgress());
  4380. CollectionState backgroundState = CollectionStateConcurrentResetMarks;
  4381. bool doBackgroundFindRoots = true;
  4382. if (foregroundResetMark || foregroundFindRoots)
  4383. {
  4384. // REVIEW: SWB, if there's only write barrier page change, we don't scan and mark?
  4385. #ifdef RECYCLER_WRITE_WATCH
  4386. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  4387. {
  4388. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
  4389. bool hasWriteWatch = (recyclerPageAllocator.ResetWriteWatch() && recyclerLargeBlockPageAllocator.ResetWriteWatch());
  4390. RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
  4391. if (!hasWriteWatch)
  4392. {
  4393. // Disable concurrent mark
  4394. this->enableConcurrentMark = false;
  4395. return false;
  4396. }
  4397. }
  4398. #endif
  4399. // In-thread synchronized GC on the concurrent thread
  4400. ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_SynchronizedImplicitRoots : ResetMarkFlags_Synchronized);
  4401. if (foregroundFindRoots)
  4402. {
  4403. this->collectionState = CollectionStateFindRoots;
  4404. FindRoots();
  4405. ScanStack();
  4406. Assert(collectionState == CollectionStateFindRoots);
  4407. backgroundState = CollectionStateConcurrentMark;
  4408. doBackgroundFindRoots = false;
  4409. }
  4410. else
  4411. {
  4412. // Do find roots in the background
  4413. backgroundState = CollectionStateConcurrentFindRoots;
  4414. }
  4415. }
  4416. if (doBackgroundFindRoots)
  4417. {
  4418. this->PrepareBackgroundFindRoots();
  4419. }
  4420. if (!StartConcurrent(backgroundState))
  4421. {
  4422. if (doBackgroundFindRoots)
  4423. {
  4424. this->RevertPrepareBackgroundFindRoots();
  4425. }
  4426. this->collectionState = CollectionStateNotCollecting;
  4427. return false;
  4428. }
  4429. return true;
  4430. }
  4431. BOOL
  4432. Recycler::StartAsynchronousBackgroundMark()
  4433. {
  4434. // Debug flags to turn off background reset mark or background find roots, default to doing every concurrently
  4435. return StartBackgroundMark(CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundResetMarksPhase), CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundFindRootsPhase));
  4436. }
  4437. BOOL
  4438. Recycler::StartSynchronousBackgroundMark()
  4439. {
  4440. return StartBackgroundMark(true, true);
  4441. }
  4442. BOOL
  4443. Recycler::StartConcurrentSweepCollect()
  4444. {
  4445. Assert(collectionState == CollectionStateNotCollecting);
  4446. #ifdef RECYCLER_TRACE
  4447. PrintCollectTrace(Js::ConcurrentSweepPhase);
  4448. #endif
  4449. this->CollectionBegin<Js::ConcurrentCollectPhase>();
  4450. this->Mark();
  4451. // We don't have rescan data if we disabled concurrent mark, assume the worst
  4452. // (which means it is harder to get into partial collect mode)
  4453. #if ENABLE_PARTIAL_GC
  4454. bool needConcurrentSweep = this->Sweep(RecyclerSweep::MaxPartialCollectRescanRootBytes, true, true);
  4455. #else
  4456. bool needConcurrentSweep = this->Sweep(true);
  4457. #endif
  4458. this->CollectionEnd<Js::ConcurrentCollectPhase>();
  4459. FinishCollection(needConcurrentSweep);
  4460. return true;
  4461. }
  4462. size_t
  4463. Recycler::BackgroundRepeatMark()
  4464. {
  4465. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRepeatMarkPhase);
  4466. Assert(this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount - 1);
  4467. size_t rescannedPageCount = this->BackgroundRescan(RescanFlags_ResetWriteWatch);
  4468. if (this->NeedOOMRescan() || this->isAborting)
  4469. {
  4470. // OOM'ed. Let's not continue
  4471. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4472. return Recycler::InvalidScanRootBytes;
  4473. }
  4474. // Rescan the stack
  4475. this->BackgroundScanStack();
  4476. // Process mark stack
  4477. this->DoBackgroundParallelMark();
  4478. if (this->NeedOOMRescan())
  4479. {
  4480. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4481. return Recycler::InvalidScanRootBytes;
  4482. }
  4483. #ifdef RECYCLER_STATS
  4484. Assert(this->backgroundRescanCount >= 1 && this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount);
  4485. this->collectionStats.backgroundMarkData[this->backgroundRescanCount - 1] = this->collectionStats.markData;
  4486. #endif
  4487. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
  4488. return rescannedPageCount;
  4489. }
  4490. char* Recycler::GetScriptThreadStackTop()
  4491. {
  4492. // We should have already checked if the recycler is thread bound or not
  4493. Assert(mainThreadHandle != NULL);
  4494. return (char*) savedThreadContext.GetStackTop();
  4495. }
  4496. size_t
  4497. Recycler::BackgroundScanStack()
  4498. {
  4499. if (this->skipStack)
  4500. {
  4501. #ifdef RECYCLER_TRACE
  4502. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
  4503. #endif
  4504. return 0;
  4505. }
  4506. if (!this->isInScript || mainThreadHandle == nullptr)
  4507. {
  4508. // No point in scanning the main thread's stack if we are not in script
  4509. // We also can't scan the main thread's stack if we are not thread bounded, and didn't create the main thread's handle
  4510. return 0;
  4511. }
  4512. char* stackTop = this->GetScriptThreadStackTop();
  4513. if (stackTop != nullptr)
  4514. {
  4515. size_t size = (char *)stackBase - stackTop;
  4516. ScanMemoryInline<false>((void **)stackTop, size);
  4517. return size;
  4518. }
  4519. return 0;
  4520. }
  4521. void
  4522. Recycler::BackgroundMark()
  4523. {
  4524. Assert(this->DoQueueTrackedObject());
  4525. this->backgroundRescanCount = 0;
  4526. this->DoBackgroundParallelMark();
  4527. if (this->NeedOOMRescan() || this->isAborting)
  4528. {
  4529. return;
  4530. }
  4531. #ifdef RECYCLER_STATS
  4532. this->collectionStats.backgroundMarkData[0] = this->collectionStats.markData;
  4533. #endif
  4534. if (PHASE_OFF1(Js::BackgroundRepeatMarkPhase))
  4535. {
  4536. return;
  4537. }
  4538. // We always do one repeat mark pass.
  4539. size_t rescannedPageCount = this->BackgroundRepeatMark();
  4540. if (this->NeedOOMRescan() || this->isAborting)
  4541. {
  4542. // OOM'ed. Let's not continue
  4543. return;
  4544. }
  4545. Assert(rescannedPageCount != Recycler::InvalidScanRootBytes);
  4546. // If we rescanned enough pages in the previous repeat mark pass, then do one more
  4547. // to try to reduce the amount of work we need to do in-thread
  4548. if (rescannedPageCount >= RecyclerHeuristic::BackgroundSecondRepeatMarkThreshold)
  4549. {
  4550. this->BackgroundRepeatMark();
  4551. if (this->NeedOOMRescan() || this->isAborting)
  4552. {
  4553. // OOM'ed. Let's not continue
  4554. return;
  4555. }
  4556. }
  4557. }
  4558. void
  4559. Recycler::BackgroundResetMarks()
  4560. {
  4561. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundResetMarksPhase);
  4562. GCETW(GC_BACKGROUNDRESETMARKS_START, (this));
  4563. Assert(IsMarkStackEmpty());
  4564. this->scanPinnedObjectMap = true;
  4565. this->hasScannedInitialImplicitRoots = false;
  4566. heapBlockMap.ResetMarks();
  4567. autoHeap.ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InBackgroundThreadImplicitRoots : ResetMarkFlags_InBackgroundThread);
  4568. GCETW(GC_BACKGROUNDRESETMARKS_STOP, (this));
  4569. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundResetMarksPhase);
  4570. }
  4571. void
  4572. Recycler::PrepareBackgroundFindRoots()
  4573. {
  4574. Assert(!this->hasPendingConcurrentFindRoot);
  4575. this->hasPendingConcurrentFindRoot = true;
  4576. // Save the thread context here. The background thread
  4577. // will use this saved context for the marking instead of
  4578. // trying to get the live thread context of the thread
  4579. SAVE_THREAD_CONTEXT();
  4580. // Temporarily disable resize so the background can scan without
  4581. // the memory being freed from under it
  4582. pinnedObjectMap.DisableResize();
  4583. // Update the cached info for big blocks in the guest arena
  4584. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4585. while (guestArenaIter.Next())
  4586. {
  4587. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4588. allocator.SetLockBlockList(true);
  4589. if (allocator.pendingDelete)
  4590. {
  4591. Assert(this->hasPendingDeleteGuestArena);
  4592. allocator.SetLockBlockList(false);
  4593. guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
  4594. }
  4595. else if (this->backgroundFinishMarkCount == 0)
  4596. {
  4597. // Update the cached info for big block
  4598. allocator.GetBigBlocks(false);
  4599. }
  4600. }
  4601. this->hasPendingDeleteGuestArena = false;
  4602. }
  4603. void
  4604. Recycler::RevertPrepareBackgroundFindRoots()
  4605. {
  4606. Assert(this->hasPendingConcurrentFindRoot);
  4607. this->hasPendingConcurrentFindRoot = false;
  4608. pinnedObjectMap.EnableResize();
  4609. }
  4610. size_t
  4611. Recycler::BackgroundFindRoots()
  4612. {
  4613. #ifdef RECYCLER_STATS
  4614. size_t lastMarkCount = this->collectionStats.markData.markCount;
  4615. #endif
  4616. size_t scanRootBytes = 0;
  4617. Assert(this->IsConcurrentFindRootState());
  4618. Assert(this->hasPendingConcurrentFindRoot);
  4619. #if ENABLE_PARTIAL_GC
  4620. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4621. #else
  4622. Assert(this->DoQueueTrackedObject());
  4623. #endif
  4624. // Only mark pinned object and guest arenas, which is where most of the roots are.
  4625. // When we go back to the main thread to rescan, we will scan the rest of the root.
  4626. // NOTE: purposefully not marking the transientPinnedObject there. as it is transient :)
  4627. // background mark the pinned object. Since we are in concurrent find root state
  4628. // the main thread won't delete any entries from the map, so concurrent read
  4629. // to the map safe.
  4630. GCETW(GC_BACKGROUNDSCANROOTS_START, (this));
  4631. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundFindRootsPhase);
  4632. scanRootBytes += this->ScanPinnedObjects</*background = */true>();
  4633. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::FindRootArenaPhase);
  4634. // background mark the guest arenas. Since we are in concurrent find root state
  4635. // the main thread won't delete any arena, so concurrent reads to them are ok.
  4636. DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
  4637. while (guestArenaIter.Next())
  4638. {
  4639. GuestArenaAllocator& allocator = guestArenaIter.Data();
  4640. if (allocator.pendingDelete)
  4641. {
  4642. // Skip guest arena that are already marked for delete
  4643. Assert(this->hasPendingDeleteGuestArena);
  4644. continue;
  4645. }
  4646. scanRootBytes += ScanArena(&allocator, true);
  4647. }
  4648. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::FindRootArenaPhase);
  4649. this->ScanImplicitRoots();
  4650. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundFindRootsPhase);
  4651. this->hasPendingConcurrentFindRoot = false;
  4652. this->collectionState = CollectionStateConcurrentMark;
  4653. GCETW(GC_BACKGROUNDSCANROOTS_STOP, (this));
  4654. RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
  4655. return scanRootBytes;
  4656. }
  4657. size_t
  4658. Recycler::BackgroundFinishMark()
  4659. {
  4660. #if ENABLE_PARTIAL_GC
  4661. Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
  4662. #else
  4663. Assert(this->DoQueueTrackedObject());
  4664. #endif
  4665. Assert(collectionState == CollectionStateConcurrentFinishMark);
  4666. size_t rescannedRootBytes = FinishMarkRescan(true) * AutoSystemInfo::PageSize;
  4667. this->collectionState = CollectionStateConcurrentFindRoots;
  4668. rescannedRootBytes += this->BackgroundFindRoots();
  4669. this->collectionState = CollectionStateConcurrentFinishMark;
  4670. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::MarkPhase);
  4671. ProcessMark(true);
  4672. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::MarkPhase);
  4673. return rescannedRootBytes;
  4674. }
  4675. void
  4676. Recycler::SweepPendingObjects(RecyclerSweep& recyclerSweep)
  4677. {
  4678. autoHeap.SweepPendingObjects(recyclerSweep);
  4679. }
  4680. void
  4681. Recycler::ConcurrentTransferSweptObjects(RecyclerSweep& recyclerSweep)
  4682. {
  4683. Assert(!recyclerSweep.IsBackground());
  4684. Assert((this->collectionState & Collection_TransferSwept) == Collection_TransferSwept);
  4685. #if ENABLE_PARTIAL_GC
  4686. if (this->hasBackgroundFinishPartial)
  4687. {
  4688. this->hasBackgroundFinishPartial = false;
  4689. this->ClearPartialCollect();
  4690. }
  4691. #endif
  4692. autoHeap.ConcurrentTransferSweptObjects(recyclerSweep);
  4693. }
  4694. #if ENABLE_PARTIAL_GC
  4695. void
  4696. Recycler::ConcurrentPartialTransferSweptObjects(RecyclerSweep& recyclerSweep)
  4697. {
  4698. Assert(!recyclerSweep.IsBackground());
  4699. Assert(!this->hasBackgroundFinishPartial);
  4700. autoHeap.ConcurrentPartialTransferSweptObjects(recyclerSweep);
  4701. }
  4702. #endif
  4703. BOOL
  4704. Recycler::FinishConcurrentCollectWrapped(CollectionFlags flags)
  4705. {
  4706. this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
  4707. #if ENABLE_CONCURRENT_GC
  4708. this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
  4709. DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
  4710. #endif
  4711. BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::FinishConcurrentCollect, flags);
  4712. return collected;
  4713. }
  4714. BOOL
  4715. Recycler::WaitForConcurrentThread(DWORD waitTime)
  4716. {
  4717. Assert(this->IsConcurrentState() || this->collectionState == CollectionStateParallelMark);
  4718. RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ConcurrentWaitPhase);
  4719. if (concurrentThread != NULL)
  4720. {
  4721. // Set the priority back to normal before we wait to ensure it doesn't starve
  4722. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
  4723. }
  4724. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, waitTime);
  4725. if (concurrentThread != NULL)
  4726. {
  4727. if (ret == WAIT_TIMEOUT)
  4728. {
  4729. // Keep the priority boost.
  4730. priorityBoost = true;
  4731. }
  4732. else
  4733. {
  4734. Assert(ret == WAIT_OBJECT_0);
  4735. // Back to below normal
  4736. SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_BELOW_NORMAL);
  4737. priorityBoost = false;
  4738. }
  4739. }
  4740. RECYCLER_PROFILE_EXEC_END(this, Js::ConcurrentWaitPhase);
  4741. return (ret == WAIT_OBJECT_0);
  4742. }
  4743. #if ENABLE_BACKGROUND_PAGE_FREEING
  4744. void
  4745. Recycler::FlushBackgroundPages()
  4746. {
  4747. recyclerPageAllocator.SuspendIdleDecommit();
  4748. recyclerPageAllocator.FlushBackgroundPages();
  4749. recyclerPageAllocator.ResumeIdleDecommit();
  4750. recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
  4751. recyclerLargeBlockPageAllocator.FlushBackgroundPages();
  4752. recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
  4753. this->threadPageAllocator->SuspendIdleDecommit();
  4754. this->threadPageAllocator->FlushBackgroundPages();
  4755. this->threadPageAllocator->ResumeIdleDecommit();
  4756. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  4757. recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
  4758. recyclerWithBarrierPageAllocator.FlushBackgroundPages();
  4759. recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
  4760. #endif
  4761. }
  4762. #endif
  4763. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4764. AutoProtectPages::AutoProtectPages(Recycler* recycler, bool protectEnabled) :
  4765. isReadOnly(false),
  4766. recycler(recycler)
  4767. {
  4768. if (protectEnabled)
  4769. {
  4770. recycler->heapBlockMap.MakeAllPagesReadOnly(recycler);
  4771. isReadOnly = true;
  4772. }
  4773. }
  4774. AutoProtectPages::~AutoProtectPages()
  4775. {
  4776. Unprotect();
  4777. }
  4778. void AutoProtectPages::Unprotect()
  4779. {
  4780. if (isReadOnly)
  4781. {
  4782. recycler->heapBlockMap.MakeAllPagesReadWrite(recycler);
  4783. isReadOnly = false;
  4784. }
  4785. }
  4786. #endif
  4787. BOOL
  4788. Recycler::FinishConcurrentCollect(CollectionFlags flags)
  4789. {
  4790. if (!this->IsConcurrentState())
  4791. {
  4792. Assert(false);
  4793. return false;
  4794. }
  4795. #ifdef PROFILE_EXEC
  4796. Js::Phase concurrentPhase = Js::ConcurrentCollectPhase;
  4797. // TODO: Remove this workaround for unreferenced local after enabled -profile for GC
  4798. static_cast<Js::Phase>(concurrentPhase);
  4799. #endif
  4800. #if ENABLE_PARTIAL_GC
  4801. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  4802. (concurrentPhase = ((this->inPartialCollectMode && this->IsConcurrentMarkState())?
  4803. Js::ConcurrentPartialCollectPhase : Js::ConcurrentCollectPhase)));
  4804. #else
  4805. RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
  4806. (concurrentPhase = Js::ConcurrentCollectPhase));
  4807. #endif
  4808. // Don't do concurrent sweep if we have priority boosted.
  4809. const BOOL forceInThread = flags & CollectOverride_ForceInThread;
  4810. bool concurrent = (flags & CollectMode_Concurrent) != 0;
  4811. concurrent = concurrent && (!priorityBoost || this->backgroundRescanCount != 1);
  4812. #ifdef RECYCLER_TRACE
  4813. collectionParam.priorityBoostConcurrentSweepOverride = priorityBoost;
  4814. #endif
  4815. const DWORD waitTime = forceInThread? INFINITE : RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
  4816. GCETW(GC_FINISHCONCURRENTWAIT_START, (this, waitTime));
  4817. const BOOL waited = WaitForConcurrentThread(waitTime);
  4818. GCETW(GC_FINISHCONCURRENTWAIT_STOP, (this, !waited));
  4819. if (!waited)
  4820. {
  4821. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4822. return false;
  4823. }
  4824. bool needConcurrentSweep = false;
  4825. if (collectionState == CollectionStateRescanWait)
  4826. {
  4827. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentRescan));
  4828. #ifdef RECYCLER_TRACE
  4829. #if ENABLE_PARTIAL_GC
  4830. PrintCollectTrace(this->inPartialCollectMode ? Js::ConcurrentPartialCollectPhase : Js::ConcurrentMarkPhase, true);
  4831. #else
  4832. PrintCollectTrace(Js::ConcurrentMarkPhase, true);
  4833. #endif
  4834. #endif
  4835. collectionState = CollectionStateRescanFindRoots;
  4836. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4837. // TODO: Change this behavior
  4838. // ProtectPagesOnRescan is not supported in PageHeap mode because the page protection is changed
  4839. // outside the PageAllocator in PageHeap mode and so pages are not in the state that the
  4840. // PageAllocator expects when it goes to change the page protection
  4841. // One viable fix is to move the guard page protection logic outside of the heap blocks
  4842. // and into the page allocator
  4843. AssertMsg(!(IsPageHeapEnabled() && GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan), "ProtectPagesOnRescan not supported in page heap mode");
  4844. AutoProtectPages protectPages(this, GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan);
  4845. #endif
  4846. const bool backgroundFinishMark = !forceInThread && concurrent && ((flags & CollectOverride_BackgroundFinishMark) != 0);
  4847. const DWORD finishMarkWaitTime = RecyclerHeuristic::BackgroundFinishMarkWaitTime(backgroundFinishMark, GetRecyclerFlagsTable());
  4848. size_t rescanRootBytes = FinishMark(finishMarkWaitTime);
  4849. if (rescanRootBytes == Recycler::InvalidScanRootBytes)
  4850. {
  4851. Assert(this->IsMarkState());
  4852. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4853. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  4854. // we timeout trying to mark.
  4855. return false;
  4856. }
  4857. #ifdef RECYCLER_STATS
  4858. collectionStats.continueCollectAllocBytes = autoHeap.uncollectedAllocBytes;
  4859. #endif
  4860. #ifdef RECYCLER_VERIFY_MARK
  4861. if (GetRecyclerFlagsTable().RecyclerVerifyMark)
  4862. {
  4863. this->VerifyMark();
  4864. }
  4865. #endif
  4866. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  4867. protectPages.Unprotect();
  4868. #endif
  4869. #if ENABLE_PARTIAL_GC
  4870. needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
  4871. #else
  4872. needConcurrentSweep = this->Sweep(concurrent);
  4873. #endif
  4874. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
  4875. }
  4876. else
  4877. {
  4878. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentTransferSwept));
  4879. GCETW(GC_FLUSHZEROPAGE_START, (this));
  4880. Assert(collectionState == CollectionStateTransferSweptWait);
  4881. #ifdef RECYCLER_TRACE
  4882. PrintCollectTrace(Js::ConcurrentSweepPhase, true);
  4883. #endif
  4884. collectionState = CollectionStateTransferSwept;
  4885. #if ENABLE_BACKGROUND_PAGE_FREEING
  4886. if (CONFIG_FLAG(EnableBGFreeZero))
  4887. {
  4888. // We should have zeroed all the pages in the background thread
  4889. Assert(!recyclerPageAllocator.HasZeroQueuedPages());
  4890. Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
  4891. this->FlushBackgroundPages();
  4892. }
  4893. #endif
  4894. GCETW(GC_FLUSHZEROPAGE_STOP, (this));
  4895. GCETW(GC_TRANSFERSWEPTOBJECTS_START, (this));
  4896. Assert(this->recyclerSweep != nullptr);
  4897. Assert(!this->recyclerSweep->IsBackground());
  4898. #if ENABLE_PARTIAL_GC
  4899. if (this->inPartialCollectMode)
  4900. {
  4901. ConcurrentPartialTransferSweptObjects(*this->recyclerSweep);
  4902. }
  4903. else
  4904. #endif
  4905. {
  4906. ConcurrentTransferSweptObjects(*this->recyclerSweep);
  4907. }
  4908. recyclerSweep->EndSweep();
  4909. GCETW(GC_TRANSFERSWEPTOBJECTS_STOP, (this));
  4910. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentTransferSwept));
  4911. }
  4912. RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
  4913. FinishCollection(needConcurrentSweep);
  4914. if (!this->CollectionInProgress())
  4915. {
  4916. if (NeedExhaustiveRepeatCollect())
  4917. {
  4918. DoCollect((CollectionFlags)(flags & ~CollectMode_Partial));
  4919. }
  4920. else
  4921. {
  4922. EndCollection();
  4923. }
  4924. }
  4925. return true;
  4926. }
  4927. #if !DISABLE_SEH
  4928. int
  4929. Recycler::ExceptFilter(LPEXCEPTION_POINTERS pEP)
  4930. {
  4931. #if DBG
  4932. // Assert exception code
  4933. if (pEP->ExceptionRecord->ExceptionCode == STATUS_ASSERTION_FAILURE)
  4934. {
  4935. return EXCEPTION_CONTINUE_SEARCH;
  4936. }
  4937. #endif
  4938. #ifdef GENERATE_DUMP
  4939. if (Js::Configuration::Global.flags.IsEnabled(Js::DumpOnCrashFlag))
  4940. {
  4941. Js::Throw::GenerateDump(pEP, Js::Configuration::Global.flags.DumpOnCrash);
  4942. }
  4943. #endif
  4944. #if DBG && _M_IX86
  4945. int callerEBP = *((int*)pEP->ContextRecord->Ebp);
  4946. Output::Print(_u("Recycler Concurrent Thread: Uncaught exception: EIP: 0x%X ExceptionCode: 0x%X EBP: 0x%X ReturnAddress: 0x%X ReturnAddress2: 0x%X\n"),
  4947. pEP->ExceptionRecord->ExceptionAddress, pEP->ExceptionRecord->ExceptionCode, pEP->ContextRecord->Eip,
  4948. pEP->ContextRecord->Ebp, *((int*)pEP->ContextRecord->Ebp + 1), *((int*) callerEBP + 1));
  4949. #endif
  4950. Output::Flush();
  4951. return EXCEPTION_CONTINUE_SEARCH;
  4952. }
  4953. #endif
  4954. unsigned int
  4955. Recycler::StaticThreadProc(LPVOID lpParameter)
  4956. {
  4957. DWORD ret = (DWORD)-1;
  4958. #if !DISABLE_SEH
  4959. __try
  4960. {
  4961. #endif
  4962. Recycler * recycler = (Recycler *)lpParameter;
  4963. #if DBG
  4964. recycler->concurrentThreadExited = false;
  4965. #endif
  4966. ret = recycler->ThreadProc();
  4967. #if !DISABLE_SEH
  4968. }
  4969. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  4970. {
  4971. Assert(false);
  4972. }
  4973. #endif
  4974. return ret;
  4975. }
  4976. void
  4977. Recycler::StaticBackgroundWorkCallback(void * callbackData)
  4978. {
  4979. Recycler * recycler = (Recycler *) callbackData;
  4980. recycler->DoBackgroundWork(true);
  4981. }
  4982. #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
  4983. static ETWEventGCActivationKind
  4984. BackgroundMarkETWEventGCActivationKind(CollectionState collectionState)
  4985. {
  4986. return collectionState == CollectionStateConcurrentFinishMark?
  4987. ETWEvent_ConcurrentFinishMark : ETWEvent_ConcurrentMark;
  4988. }
  4989. #endif
  4990. void
  4991. Recycler::DoBackgroundWork(bool forceForeground)
  4992. {
  4993. if (this->collectionState == CollectionStateConcurrentWrapperCallback)
  4994. {
  4995. this->collectionWrapper->ConcurrentCallback();
  4996. }
  4997. else if (this->collectionState == CollectionStateParallelMark)
  4998. {
  4999. this->ProcessParallelMark(false, &this->markContext);
  5000. }
  5001. else if (this->IsConcurrentMarkState())
  5002. {
  5003. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, this->collectionState == CollectionStateConcurrentFinishMark?
  5004. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5005. GCETW_INTERNAL(GC_START, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5006. DebugOnly(this->markContext.GetPageAllocator()->SetConcurrentThreadId(::GetCurrentThreadId()));
  5007. Assert(this->enableConcurrentMark);
  5008. if (this->collectionState != CollectionStateConcurrentFinishMark)
  5009. {
  5010. this->StartQueueTrackedObject();
  5011. }
  5012. switch (this->collectionState)
  5013. {
  5014. case CollectionStateConcurrentResetMarks:
  5015. this->BackgroundResetMarks();
  5016. this->BackgroundResetWriteWatchAll();
  5017. this->collectionState = CollectionStateConcurrentFindRoots;
  5018. // fall-through
  5019. case CollectionStateConcurrentFindRoots:
  5020. this->BackgroundFindRoots();
  5021. this->BackgroundScanStack();
  5022. this->collectionState = CollectionStateConcurrentMark;
  5023. // fall-through
  5024. case CollectionStateConcurrentMark:
  5025. this->BackgroundMark();
  5026. Assert(this->collectionState == CollectionStateConcurrentMark);
  5027. RECORD_TIMESTAMP(concurrentMarkFinishTime);
  5028. break;
  5029. case CollectionStateConcurrentFinishMark:
  5030. this->backgroundRescanRootBytes = this->BackgroundFinishMark();
  5031. Assert(!HasPendingMarkObjects());
  5032. break;
  5033. default:
  5034. Assert(false);
  5035. break;
  5036. };
  5037. GCETW_INTERNAL(GC_STOP, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
  5038. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, this->collectionState == CollectionStateConcurrentFinishMark?
  5039. Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
  5040. this->collectionState = CollectionStateRescanWait;
  5041. DebugOnly(this->markContext.GetPageAllocator()->ClearConcurrentThreadId());
  5042. }
  5043. else
  5044. {
  5045. RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
  5046. GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep));
  5047. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5048. Assert(this->enableConcurrentSweep);
  5049. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5050. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5051. if (CONFIG_FLAG(EnableBGFreeZero))
  5052. {
  5053. // Zero the queued pages first so they are available to be allocated
  5054. recyclerPageAllocator.BackgroundZeroQueuedPages();
  5055. recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
  5056. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5057. recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
  5058. #endif
  5059. }
  5060. #endif
  5061. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5062. GCETW(GC_BACKGROUNDSWEEP_START, (this));
  5063. Assert(this->recyclerSweep != nullptr);
  5064. this->recyclerSweep->BackgroundSweep();
  5065. uint sweptBytes = 0;
  5066. #ifdef RECYCLER_STATS
  5067. sweptBytes = (uint)collectionStats.objectSweptBytes;
  5068. #endif
  5069. GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
  5070. #if ENABLE_BACKGROUND_PAGE_ZEROING
  5071. if (CONFIG_FLAG(EnableBGFreeZero))
  5072. {
  5073. // Drain the zero queue again as we might have free more during sweep
  5074. // in the background
  5075. GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
  5076. recyclerPageAllocator.BackgroundZeroQueuedPages();
  5077. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5078. recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
  5079. #endif
  5080. recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
  5081. GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
  5082. }
  5083. #endif
  5084. GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
  5085. Assert(this->collectionState == CollectionStateConcurrentSweep);
  5086. this->collectionState = CollectionStateTransferSweptWait;
  5087. RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
  5088. }
  5089. SetEvent(this->concurrentWorkDoneEvent);
  5090. collectionWrapper->WaitCollectionCallBack();
  5091. }
  5092. DWORD
  5093. Recycler::ThreadProc()
  5094. {
  5095. Assert(this->IsConcurrentEnabled());
  5096. #if !defined(_UCRT)
  5097. // We do this before we set the concurrentWorkDoneEvent because GetModuleHandleEx requires
  5098. // getting the loader lock. We could have the following case:
  5099. // Thread A => Initialize Concurrent Thread (C)
  5100. // C signals Signal Done
  5101. // C yields since its lower priority
  5102. // Thread A starts running- and is told to shut down.
  5103. // Thread A grabs loader lock as part of the shutdown sequence
  5104. // Thread A waits for C to be done
  5105. // C wakes up now- and tries to grab loader lock.
  5106. // To prevent this deadlock, we call GetModuleHandleEx first and then set the concurrentWorkDoneEvent
  5107. HMODULE dllHandle = NULL;
  5108. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&Recycler::StaticThreadProc, &dllHandle))
  5109. {
  5110. dllHandle = NULL;
  5111. }
  5112. #endif
  5113. #ifdef ENABLE_JS_ETW
  5114. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  5115. GUID activityId = { 0 };
  5116. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  5117. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  5118. #endif
  5119. // Signal that the thread has started
  5120. SetEvent(this->concurrentWorkDoneEvent);
  5121. SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
  5122. #if defined(DBG) && defined(PROFILE_EXEC)
  5123. this->backgroundProfilerPageAllocator.SetConcurrentThreadId(::GetCurrentThreadId());
  5124. #endif
  5125. #ifdef IDLE_DECOMMIT_ENABLED
  5126. DWORD handleCount = this->concurrentIdleDecommitEvent? 2 : 1;
  5127. HANDLE handles[2] = { this->concurrentWorkReadyEvent, this->concurrentIdleDecommitEvent };
  5128. #endif
  5129. do
  5130. {
  5131. #ifdef IDLE_DECOMMIT_ENABLED
  5132. needIdleDecommitSignal = IdleDecommitSignal_None;
  5133. DWORD threadPageAllocatorWaitTime = threadPageAllocator->IdleDecommit();
  5134. DWORD recyclerPageAllocatorWaitTime = recyclerPageAllocator.IdleDecommit();
  5135. DWORD waitTime = min(threadPageAllocatorWaitTime, recyclerPageAllocatorWaitTime);
  5136. DWORD recyclerLargeBlockPageAllocatorWaitTime = recyclerLargeBlockPageAllocator.IdleDecommit();
  5137. waitTime = min(waitTime, recyclerLargeBlockPageAllocatorWaitTime);
  5138. #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
  5139. DWORD recyclerWithBarrierPageAllocatorWaitTime = recyclerWithBarrierPageAllocator.IdleDecommit();
  5140. waitTime = min(waitTime, recyclerWithBarrierPageAllocatorWaitTime);
  5141. #endif
  5142. if (waitTime == INFINITE)
  5143. {
  5144. DWORD ret = ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedSignal, IdleDecommitSignal_None);
  5145. if (ret == IdleDecommitSignal_NeedTimer)
  5146. {
  5147. #if DBG
  5148. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5149. {
  5150. Output::Print(_u("Recycler Thread IdleDecommit Need Timer\n"));
  5151. Output::Flush();
  5152. }
  5153. #endif
  5154. continue;
  5155. }
  5156. }
  5157. #if DBG
  5158. else
  5159. {
  5160. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5161. {
  5162. Output::Print(_u("Recycler Thread IdleDecommit Wait %d\n"), waitTime);
  5163. Output::Flush();
  5164. }
  5165. }
  5166. #endif
  5167. DWORD result = WaitForMultipleObjectsEx(handleCount, handles, FALSE, waitTime, FALSE);
  5168. if (result != WAIT_OBJECT_0)
  5169. {
  5170. Assert((handleCount == 2 && result == WAIT_OBJECT_0 + 1) || (waitTime != INFINITE && result == WAIT_TIMEOUT));
  5171. #if DBG
  5172. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
  5173. {
  5174. if (result == WAIT_TIMEOUT)
  5175. {
  5176. Output::Print(_u("Recycler Thread IdleDecommit Timeout: %d\n"), waitTime);
  5177. }
  5178. else
  5179. {
  5180. Output::Print(_u("Recycler Thread IdleDecommit Signaled\n"));
  5181. }
  5182. Output::Flush();
  5183. }
  5184. #endif
  5185. continue;
  5186. }
  5187. #else
  5188. DWORD result = WaitForSingleObject(this->concurrentWorkReadyEvent, INFINITE);
  5189. Assert(result == WAIT_OBJECT_0);
  5190. #endif
  5191. if (this->collectionState == CollectionStateExit)
  5192. {
  5193. #if DBG
  5194. this->concurrentThreadExited = true;
  5195. #endif
  5196. break;
  5197. }
  5198. DoBackgroundWork();
  5199. }
  5200. while (true);
  5201. SetEvent(this->concurrentWorkDoneEvent);
  5202. #if !defined(_UCRT)
  5203. if (dllHandle)
  5204. {
  5205. FreeLibraryAndExitThread(dllHandle, 0);
  5206. }
  5207. else
  5208. #endif
  5209. {
  5210. return 0;
  5211. }
  5212. }
  5213. #endif //ENABLE_CONCURRENT_GC
  5214. void
  5215. Recycler::FinishCollection(bool needConcurrentSweep)
  5216. {
  5217. #if ENABLE_CONCURRENT_GC
  5218. Assert(!!this->InConcurrentSweep() == needConcurrentSweep);
  5219. #else
  5220. Assert(!needConcurrentSweep);
  5221. #endif
  5222. if (!needConcurrentSweep)
  5223. {
  5224. FinishCollection();
  5225. }
  5226. else
  5227. {
  5228. FinishDisposeObjects();
  5229. }
  5230. }
  5231. void
  5232. Recycler::FinishCollection()
  5233. {
  5234. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  5235. Assert(!this->hasBackgroundFinishPartial);
  5236. #endif
  5237. Assert(!this->hasPendingDeleteGuestArena);
  5238. // Reset the time heuristics
  5239. ScheduleNextCollection();
  5240. {
  5241. AutoSwitchCollectionStates collectionState(this,
  5242. /* entry state */ CollectionStatePostCollectionCallback,
  5243. /* exit state */ CollectionStateNotCollecting);
  5244. collectionWrapper->PostCollectionCallBack();
  5245. }
  5246. #if ENABLE_CONCURRENT_GC
  5247. this->backgroundFinishMarkCount = 0;
  5248. #endif
  5249. // Do a partial page decommit now
  5250. if (decommitOnFinish)
  5251. {
  5252. ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
  5253. {
  5254. pageAlloc->DecommitNow(false);
  5255. });
  5256. this->decommitOnFinish = false;
  5257. }
  5258. RECYCLER_SLOW_CHECK(autoHeap.Check());
  5259. #ifdef RECYCLER_MEMORY_VERIFY
  5260. this->Verify(Js::RecyclerPhase);
  5261. #endif
  5262. #ifdef RECYCLER_FINALIZE_CHECK
  5263. autoHeap.VerifyFinalize();
  5264. #endif
  5265. #ifdef ENABLE_JS_ETW
  5266. FlushFreeRecord();
  5267. #endif
  5268. FinishDisposeObjects();
  5269. #ifdef RECYCLER_FINALIZE_CHECK
  5270. if (!this->IsMarkState())
  5271. {
  5272. autoHeap.VerifyFinalize();
  5273. }
  5274. #endif
  5275. #ifdef RECYCLER_STATS
  5276. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5277. {
  5278. PrintCollectStats();
  5279. }
  5280. #endif
  5281. #ifdef PROFILE_RECYCLER_ALLOC
  5282. if (MemoryProfiler::IsTraceEnabled(true))
  5283. {
  5284. PrintAllocStats();
  5285. }
  5286. #endif
  5287. #ifdef DUMP_FRAGMENTATION_STATS
  5288. if (GetRecyclerFlagsTable().DumpFragmentationStats)
  5289. {
  5290. autoHeap.DumpFragmentationStats();
  5291. }
  5292. #endif
  5293. RECORD_TIMESTAMP(currentCollectionEndTime);
  5294. }
  5295. void
  5296. Recycler::SetExternalRootMarker(ExternalRootMarker fn, void * context)
  5297. {
  5298. externalRootMarker = fn;
  5299. externalRootMarkerContext = context;
  5300. }
  5301. // TODO: (leish) remove following function? seems not make sense to re-allocate in recycler
  5302. ArenaData **
  5303. Recycler::RegisterExternalGuestArena(ArenaData* guestArena)
  5304. {
  5305. return externalGuestArenaList.PrependNode(&NoThrowHeapAllocator::Instance, guestArena);
  5306. }
  5307. void
  5308. Recycler::UnregisterExternalGuestArena(ArenaData* guestArena)
  5309. {
  5310. externalGuestArenaList.Remove(&NoThrowHeapAllocator::Instance, guestArena);
  5311. }
  5312. void
  5313. Recycler::UnregisterExternalGuestArena(ArenaData** guestArena)
  5314. {
  5315. externalGuestArenaList.RemoveElement(&NoThrowHeapAllocator::Instance, guestArena);
  5316. }
  5317. void
  5318. Recycler::SetCollectionWrapper(RecyclerCollectionWrapper * wrapper)
  5319. {
  5320. this->collectionWrapper = wrapper;
  5321. #if LARGEHEAPBLOCK_ENCODING
  5322. this->Cookie = wrapper->GetRandomNumber();
  5323. #else
  5324. this->Cookie = 0;
  5325. #endif
  5326. }
  5327. char *
  5328. Recycler::Realloc(void* buffer, DECLSPEC_GUARD_OVERFLOW size_t existingBytes, DECLSPEC_GUARD_OVERFLOW size_t requestedBytes, bool truncate)
  5329. {
  5330. Assert(requestedBytes > 0);
  5331. if (existingBytes == 0)
  5332. {
  5333. Assert(buffer == nullptr);
  5334. return Alloc(requestedBytes);
  5335. }
  5336. Assert(buffer != nullptr);
  5337. size_t nbytes = AllocSizeMath::Align(requestedBytes, HeapConstants::ObjectGranularity);
  5338. // Since we successfully allocated, we shouldn't have integer overflow here
  5339. size_t nbytesExisting = AllocSizeMath::Align(existingBytes, HeapConstants::ObjectGranularity);
  5340. Assert(nbytesExisting >= existingBytes);
  5341. if (nbytes == nbytesExisting)
  5342. {
  5343. return (char *)buffer;
  5344. }
  5345. char* replacementBuf = this->Alloc(requestedBytes);
  5346. if (replacementBuf != nullptr)
  5347. {
  5348. // Truncate
  5349. if (existingBytes > requestedBytes && truncate)
  5350. {
  5351. js_memcpy_s(replacementBuf, requestedBytes, buffer, requestedBytes);
  5352. }
  5353. else
  5354. {
  5355. js_memcpy_s(replacementBuf, requestedBytes, buffer, existingBytes);
  5356. }
  5357. }
  5358. if (nbytesExisting > 0)
  5359. {
  5360. this->Free(buffer, nbytesExisting);
  5361. }
  5362. return replacementBuf;
  5363. }
  5364. bool
  5365. Recycler::ForceSweepObject()
  5366. {
  5367. #ifdef RECYCLER_TEST_SUPPORT
  5368. if (BinaryFeatureControl::RecyclerTest())
  5369. {
  5370. if (checkFn != nullptr)
  5371. {
  5372. return true;
  5373. }
  5374. }
  5375. #endif
  5376. #ifdef PROFILE_RECYCLER_ALLOC
  5377. if (trackerDictionary != nullptr)
  5378. {
  5379. // Need to sweep object if we are tracing recycler allocs
  5380. return true;
  5381. }
  5382. #endif
  5383. #ifdef RECYCLER_STATS
  5384. if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
  5385. {
  5386. return true;
  5387. }
  5388. #endif
  5389. #if DBG
  5390. // Force sweeping the object so we can assert that we are not sweeping objects that are still implicit roots
  5391. if (this->enableScanImplicitRoots)
  5392. {
  5393. return true;
  5394. }
  5395. #endif
  5396. return false;
  5397. }
  5398. bool
  5399. Recycler::ShouldIdleCollectOnExit()
  5400. {
  5401. // Always reset partial heuristics even if we are not doing idle collecting
  5402. // So we don't carry the heuristics to the next script activation
  5403. this->ResetPartialHeuristicCounters();
  5404. if (this->CollectionInProgress())
  5405. {
  5406. #ifdef RECYCLER_TRACE
  5407. CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase, _u("%04X> Skipping scheduling Idle Collect. Reason: Collection in progress\n"), ::GetCurrentThreadId());
  5408. #endif
  5409. // Don't schedule an idle collect if there is a collection going on already
  5410. // IDLE-GC-TODO: Fix ResetHeuristics in the GC so we can detect memory allocation during
  5411. // the concurrent collect and still schedule an idle collect
  5412. return false;
  5413. }
  5414. if (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5415. {
  5416. return true;
  5417. }
  5418. uint32 nextTime = tickCountNextCollection - tickDiffToNextCollect;
  5419. // We will try to start a concurrent collect if we are within .9 ms to next scheduled collection, AND,
  5420. // the size of allocation is larger than 32M. This is similar to CollectionAllocation logic, just
  5421. // earlier in both time heuristic and size heuristic, so we can do some concurrent GC while we are
  5422. // not in script.
  5423. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit
  5424. && GetTickCount() > nextTime)
  5425. {
  5426. #ifdef RECYCLER_TRACE
  5427. if (CUSTOM_PHASE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
  5428. {
  5429. if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit)
  5430. {
  5431. Output::Print(_u("%04X> Idle collect on exit: alloc %d\n"), ::GetCurrentThreadId(), autoHeap.uncollectedAllocBytes);
  5432. }
  5433. else
  5434. {
  5435. Output::Print(_u("%04X> Idle collect on exit: time %d\n"), ::GetCurrentThreadId(), tickCountNextCollection - GetTickCount());
  5436. }
  5437. Output::Flush();
  5438. }
  5439. #endif
  5440. this->CollectNow<CollectNowConcurrent>();
  5441. return false;
  5442. }
  5443. Assert(!this->CollectionInProgress());
  5444. // Idle GC use the size heuristic. Only need to schedule on if we passed it.
  5445. return (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::IdleUncollectedAllocBytesCollection);
  5446. }
  5447. #if ENABLE_CONCURRENT_GC
  5448. bool
  5449. RecyclerParallelThread::StartConcurrent()
  5450. {
  5451. if (this->recycler->threadService->HasCallback())
  5452. {
  5453. // This may be the first time. If so, initialize by creating the doneEvent.
  5454. if (this->concurrentWorkDoneEvent == NULL)
  5455. {
  5456. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5457. if (this->concurrentWorkDoneEvent == nullptr)
  5458. {
  5459. return false;
  5460. }
  5461. }
  5462. Assert(concurrentThread == NULL);
  5463. Assert(concurrentWorkReadyEvent == NULL);
  5464. // Invoke thread service to process work
  5465. if (!this->recycler->threadService->Invoke(RecyclerParallelThread::StaticBackgroundWorkCallback, this))
  5466. {
  5467. return false;
  5468. }
  5469. }
  5470. else
  5471. {
  5472. // This may be the first time. If so, initialize and create thread.
  5473. if (this->concurrentWorkDoneEvent == NULL)
  5474. {
  5475. return this->EnableConcurrent(false);
  5476. }
  5477. else
  5478. {
  5479. Assert(this->concurrentThread != NULL);
  5480. Assert(this->concurrentWorkReadyEvent != NULL);
  5481. // signal that thread has been initialized
  5482. SetEvent(this->concurrentWorkReadyEvent);
  5483. }
  5484. }
  5485. return true;
  5486. }
  5487. bool
  5488. RecyclerParallelThread::EnableConcurrent(bool waitForThread)
  5489. {
  5490. this->synchronizeOnStartup = waitForThread;
  5491. Assert(this->concurrentWorkDoneEvent == NULL);
  5492. Assert(this->concurrentWorkReadyEvent == NULL);
  5493. Assert(this->concurrentThread == NULL);
  5494. this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5495. if (this->concurrentWorkDoneEvent == nullptr)
  5496. {
  5497. return false;
  5498. }
  5499. this->concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  5500. if (this->concurrentWorkReadyEvent == nullptr)
  5501. {
  5502. CloseHandle(this->concurrentWorkDoneEvent);
  5503. this->concurrentWorkDoneEvent = NULL;
  5504. return false;
  5505. }
  5506. this->concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &RecyclerParallelThread::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
  5507. if (this->concurrentThread != nullptr && waitForThread)
  5508. {
  5509. // Wait for thread to initialize
  5510. HANDLE handle[2] = { this->concurrentWorkDoneEvent, this->concurrentThread };
  5511. DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
  5512. if (ret == WAIT_OBJECT_0)
  5513. {
  5514. return true;
  5515. }
  5516. CloseHandle(concurrentThread);
  5517. concurrentThread = nullptr;
  5518. }
  5519. if (this->concurrentThread == nullptr)
  5520. {
  5521. CloseHandle(this->concurrentWorkDoneEvent);
  5522. this->concurrentWorkDoneEvent = NULL;
  5523. CloseHandle(this->concurrentWorkReadyEvent);
  5524. this->concurrentWorkReadyEvent = NULL;
  5525. return false;
  5526. }
  5527. return true;
  5528. }
  5529. template <uint parallelId>
  5530. void
  5531. Recycler::ParallelWorkFunc()
  5532. {
  5533. Assert(parallelId == 0 || parallelId == 1);
  5534. MarkContext * markContext = (parallelId == 0 ? &this->parallelMarkContext2 : &this->parallelMarkContext3);
  5535. switch (this->collectionState)
  5536. {
  5537. case CollectionStateParallelMark:
  5538. this->ProcessParallelMark(false, markContext);
  5539. break;
  5540. case CollectionStateBackgroundParallelMark:
  5541. this->ProcessParallelMark(true, markContext);
  5542. break;
  5543. default:
  5544. Assert(false);
  5545. }
  5546. }
  5547. void
  5548. RecyclerParallelThread::WaitForConcurrent()
  5549. {
  5550. Assert(this->concurrentThread != NULL || this->recycler->threadService->HasCallback());
  5551. Assert(this->concurrentWorkDoneEvent != NULL);
  5552. DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, INFINITE);
  5553. Assert(ret == WAIT_OBJECT_0);
  5554. }
  5555. void
  5556. RecyclerParallelThread::Shutdown()
  5557. {
  5558. Assert(this->recycler->collectionState == CollectionStateExit);
  5559. if (this->recycler->threadService->HasCallback())
  5560. {
  5561. if (this->concurrentWorkDoneEvent != NULL)
  5562. {
  5563. CloseHandle(this->concurrentWorkDoneEvent);
  5564. this->concurrentWorkDoneEvent = NULL;
  5565. }
  5566. }
  5567. else
  5568. {
  5569. if (this->concurrentThread != NULL)
  5570. {
  5571. HANDLE handles[2] = { concurrentWorkDoneEvent, concurrentThread };
  5572. SetEvent(concurrentWorkReadyEvent);
  5573. // During process shutdown, OS might kill this (recycler parallel i.e. concurrent) thread and it will not get chance to signal concurrentWorkDoneEvent.
  5574. // When we are performing shutdown of main (recycler) thread here, if we wait on concurrentWorkDoneEvent, WaitForObject() will never return.
  5575. // Hence wait for concurrentWorkDoneEvent + concurrentThread so if concurrentThread got killed, WaitForObject() will return and we will
  5576. // proceed further.
  5577. DWORD fRet = WaitForMultipleObjectsEx(2, handles, FALSE, INFINITE, FALSE);
  5578. AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
  5579. CloseHandle(this->concurrentWorkDoneEvent);
  5580. this->concurrentWorkDoneEvent = NULL;
  5581. CloseHandle(this->concurrentWorkReadyEvent);
  5582. this->concurrentWorkReadyEvent = NULL;
  5583. CloseHandle(this->concurrentThread);
  5584. this->concurrentThread = NULL;
  5585. }
  5586. }
  5587. Assert(this->concurrentThread == NULL);
  5588. Assert(this->concurrentWorkReadyEvent == NULL);
  5589. Assert(this->concurrentWorkDoneEvent == NULL);
  5590. }
  5591. // static
  5592. unsigned int
  5593. RecyclerParallelThread::StaticThreadProc(LPVOID lpParameter)
  5594. {
  5595. DWORD ret = (DWORD)-1;
  5596. #if !DISABLE_SEH
  5597. __try
  5598. {
  5599. #endif
  5600. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)lpParameter;
  5601. Recycler * recycler = parallelThread->recycler;
  5602. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  5603. Assert(recycler->IsConcurrentEnabled());
  5604. #if !defined(_UCRT)
  5605. HMODULE dllHandle = NULL;
  5606. if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&RecyclerParallelThread::StaticThreadProc, &dllHandle))
  5607. {
  5608. dllHandle = NULL;
  5609. }
  5610. #endif
  5611. #ifdef ENABLE_JS_ETW
  5612. // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
  5613. GUID activityId = { 0 };
  5614. auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
  5615. Assert(eventActivityIdControlResult == ERROR_SUCCESS);
  5616. #endif
  5617. // If this thread is created on demand we already have work to process and do not need to wait
  5618. bool mustWait = parallelThread->synchronizeOnStartup;
  5619. do
  5620. {
  5621. if (mustWait)
  5622. {
  5623. // Signal completion and wait for next work
  5624. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5625. DWORD result = WaitForSingleObject(parallelThread->concurrentWorkReadyEvent, INFINITE);
  5626. Assert(result == WAIT_OBJECT_0);
  5627. }
  5628. if (recycler->collectionState == CollectionStateExit)
  5629. {
  5630. // Exit thread
  5631. break;
  5632. }
  5633. // Invoke the workFunc to do real work
  5634. (recycler->*workFunc)();
  5635. // We always wait after the first time
  5636. mustWait = true;
  5637. }
  5638. while (true);
  5639. // Signal to main thread that we have stopped processing and will shut down.
  5640. // Note that after this point, we cannot access anything on the Recycler instance
  5641. // because the main thread may have torn it down already.
  5642. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5643. #if !defined(_UCRT)
  5644. if (dllHandle)
  5645. {
  5646. FreeLibraryAndExitThread(dllHandle, 0);
  5647. }
  5648. #endif
  5649. ret = 0;
  5650. #if !DISABLE_SEH
  5651. }
  5652. __except(Recycler::ExceptFilter(GetExceptionInformation()))
  5653. {
  5654. Assert(false);
  5655. }
  5656. #endif
  5657. return ret;
  5658. }
  5659. // static
  5660. void
  5661. RecyclerParallelThread::StaticBackgroundWorkCallback(void * callbackData)
  5662. {
  5663. RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)callbackData;
  5664. Recycler * recycler = parallelThread->recycler;
  5665. RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
  5666. (recycler->*workFunc)();
  5667. SetEvent(parallelThread->concurrentWorkDoneEvent);
  5668. }
  5669. #endif
  5670. #ifdef RECYCLER_TRACE
  5671. void
  5672. Recycler::CaptureCollectionParam(CollectionFlags flags, bool repeat)
  5673. {
  5674. collectionParam.priorityBoostConcurrentSweepOverride = false;
  5675. collectionParam.repeat = repeat;
  5676. collectionParam.finishOnly = false;
  5677. collectionParam.flags = flags;
  5678. collectionParam.uncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
  5679. #if ENABLE_PARTIAL_GC
  5680. collectionParam.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
  5681. collectionParam.inPartialCollectMode = inPartialCollectMode;
  5682. collectionParam.uncollectedNewPageCount = autoHeap.uncollectedNewPageCount;
  5683. collectionParam.unusedPartialCollectFreeBytes = autoHeap.unusedPartialCollectFreeBytes;
  5684. #endif
  5685. }
  5686. void
  5687. Recycler::PrintCollectTrace(Js::Phase phase, bool finish, bool noConcurrentWork)
  5688. {
  5689. if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase) ||
  5690. GetRecyclerFlagsTable().Trace.IsEnabled(phase))
  5691. {
  5692. const BOOL allocSize = collectionParam.flags & CollectHeuristic_AllocSize;
  5693. const BOOL timedIfScriptActive = collectionParam.flags & CollectHeuristic_TimeIfScriptActive;
  5694. const BOOL timedIfInScript = collectionParam.flags & CollectHeuristic_TimeIfInScript;
  5695. const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (collectionParam.flags & CollectHeuristic_Time);
  5696. const BOOL concurrent = collectionParam.flags & CollectMode_Concurrent;
  5697. const BOOL finishConcurrent = collectionParam.flags & CollectOverride_FinishConcurrent;
  5698. const BOOL exhaustive = collectionParam.flags & CollectMode_Exhaustive;
  5699. const BOOL forceInThread = collectionParam.flags & CollectOverride_ForceInThread;
  5700. const BOOL forceFinish = collectionParam.flags & CollectOverride_ForceFinish;
  5701. #if ENABLE_PARTIAL_GC
  5702. BOOL partial = collectionParam.flags & CollectMode_Partial ;
  5703. #endif
  5704. Output::Print(_u("%04X> RC(%p): %s%s%s%s%s%s%s:"), this->mainThreadId, this,
  5705. collectionParam.domCollect? _u("[DOM] ") : _u(""),
  5706. collectionParam.repeat? _u("[Repeat] "): _u(""),
  5707. this->inDispose? _u("[Nested]") : _u(""),
  5708. forceInThread? _u("Force In thread ") : _u(""),
  5709. finish? _u("Finish ") : _u(""),
  5710. exhaustive? _u("Exhaustive ") : _u(""),
  5711. Js::PhaseNames[phase]);
  5712. if (noConcurrentWork)
  5713. {
  5714. Assert(finish);
  5715. Output::Print(_u(" No concurrent work"));
  5716. }
  5717. else if (collectionParam.finishOnly)
  5718. {
  5719. Assert(!collectionParam.repeat);
  5720. Assert(finish);
  5721. #if ENABLE_CONCURRENT_GC
  5722. if (collectionState == CollectionStateRescanWait)
  5723. {
  5724. if (forceFinish)
  5725. {
  5726. Output::Print(_u(" Force finish mark and sweep"));
  5727. }
  5728. else if (concurrent && this->enableConcurrentSweep)
  5729. {
  5730. if (!collectionParam.priorityBoostConcurrentSweepOverride)
  5731. {
  5732. Output::Print(_u(" Finish mark and start concurrent sweep"));
  5733. }
  5734. else
  5735. {
  5736. Output::Print(_u(" Finish mark and sweep (priority boost overridden concurrent sweep)"));
  5737. }
  5738. }
  5739. else
  5740. {
  5741. Output::Print(_u(" Finish mark and sweep"));
  5742. }
  5743. }
  5744. else
  5745. {
  5746. Assert(collectionState == CollectionStateTransferSweptWait);
  5747. if (forceFinish)
  5748. {
  5749. Output::Print(_u(" Force finish sweep"));
  5750. }
  5751. else
  5752. {
  5753. Output::Print(_u(" Finish sweep"));
  5754. }
  5755. }
  5756. #endif // ENABLE_CONCURRENT_GC
  5757. }
  5758. else
  5759. {
  5760. if (finish && !concurrent)
  5761. {
  5762. Output::Print(_u(" Not concurrent collect"));
  5763. }
  5764. if ((finish && finishConcurrent))
  5765. {
  5766. Output::Print(_u(" No heuristic"));
  5767. }
  5768. #if ENABLE_CONCURRENT_GC
  5769. else if (finish && priorityBoost)
  5770. {
  5771. Output::Print(_u(" Priority boost no heuristic"));
  5772. }
  5773. #endif
  5774. else
  5775. {
  5776. Output::SkipToColumn(50);
  5777. bool byteCountUsed = false;
  5778. bool timeUsed = false;
  5779. #if ENABLE_PARTIAL_GC
  5780. bool newPageUsed = false;
  5781. if (phase == Js::PartialCollectPhase || phase == Js::ConcurrentPartialCollectPhase)
  5782. {
  5783. Assert(collectionParam.flags & CollectMode_Partial);
  5784. newPageUsed = !!allocSize;
  5785. }
  5786. else if (partial && collectionParam.inPartialCollectMode && collectionParam.uncollectedNewPageCount > collectionParam.uncollectedNewPageCountPartialCollect)
  5787. {
  5788. newPageUsed = true;
  5789. }
  5790. else
  5791. #endif // ENABLE_PARTIAL_GC
  5792. {
  5793. byteCountUsed = !!allocSize;
  5794. timeUsed = !!timed;
  5795. }
  5796. Output::Print(byteCountUsed? _u("*") : (allocSize? _u(" ") : _u("~")));
  5797. Output::Print(_u("B:%8d "), collectionParam.uncollectedAllocBytes);
  5798. Output::Print(timeUsed? _u("*") : (timed? _u(" ") : _u("~")));
  5799. Output::Print(_u("T:%4d "), -collectionParam.timeDiff);
  5800. #if ENABLE_PARTIAL_GC
  5801. if (collectionParam.inPartialCollectMode)
  5802. {
  5803. Output::Print(_u("L:%5d "), collectionParam.uncollectedNewPageCountPartialCollect);
  5804. }
  5805. else
  5806. {
  5807. Output::Print(_u("L:----- "));
  5808. }
  5809. Output::Print(newPageUsed? _u("*") : (partial? _u(" ") : _u("~")));
  5810. Output::Print(_u("P:%5d(%9d) "), collectionParam.uncollectedNewPageCount, collectionParam.uncollectedNewPageCount * AutoSystemInfo::PageSize);
  5811. Output::Print(_u("U:%8d"), collectionParam.unusedPartialCollectFreeBytes);
  5812. #endif // ENABLE_PARTIAL_GC
  5813. }
  5814. }
  5815. Output::Print(_u("\n"));
  5816. Output::Flush();
  5817. }
  5818. }
  5819. #endif
  5820. #ifdef RECYCLER_STATS
  5821. void
  5822. Recycler::PrintHeapBlockStats(char16 const * name, HeapBlock::HeapBlockType type)
  5823. {
  5824. size_t liveCount = collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type];
  5825. Output::Print(_u(" %6s : %5d %5d %5d %5.1f"), name,
  5826. liveCount, collectionStats.heapBlockFreeCount[type], collectionStats.heapBlockCount[type],
  5827. (double)collectionStats.heapBlockFreeCount[type] / (double)collectionStats.heapBlockCount[type] * 100);
  5828. if (type < HeapBlock::SmallBlockTypeCount)
  5829. {
  5830. Output::Print(_u(" : %5d %6.1f : %5d %6.1f"),
  5831. collectionStats.heapBlockSweptCount[type],
  5832. (double)collectionStats.heapBlockSweptCount[type] / (double)liveCount * 100,
  5833. collectionStats.heapBlockConcurrentSweptCount[type],
  5834. (double)collectionStats.heapBlockConcurrentSweptCount[type] / (double)collectionStats.heapBlockSweptCount[type] * 100);
  5835. }
  5836. }
  5837. void
  5838. Recycler::PrintHeapBlockMemoryStats(char16 const * name, HeapBlock::HeapBlockType type)
  5839. {
  5840. size_t allocableFreeByteCount = collectionStats.heapBlockFreeByteCount[type];
  5841. #if ENABLE_PARTIAL_GC
  5842. size_t partialUnusedBytes = 0;
  5843. if (this->enablePartialCollect)
  5844. {
  5845. partialUnusedBytes = allocableFreeByteCount
  5846. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[type];
  5847. allocableFreeByteCount -= partialUnusedBytes;
  5848. }
  5849. #endif
  5850. size_t totalByteCount = (collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type]) * AutoSystemInfo::PageSize;
  5851. size_t liveByteCount = totalByteCount - collectionStats.heapBlockFreeByteCount[type];
  5852. Output::Print(_u(" %6s: %10d %10d"), name, liveByteCount, allocableFreeByteCount);
  5853. #if ENABLE_PARTIAL_GC
  5854. if (this->enablePartialCollect &&
  5855. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  5856. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  5857. #ifdef RECYCLER_WRITE_BARRIER
  5858. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  5859. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  5860. #endif
  5861. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  5862. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  5863. #ifdef RECYCLER_WRITE_BARRIER
  5864. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  5865. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  5866. #endif
  5867. ))
  5868. {
  5869. Output::Print(_u(" %10d"), partialUnusedBytes);
  5870. }
  5871. else
  5872. #endif
  5873. {
  5874. Output::Print(_u(" "));
  5875. }
  5876. Output::Print(_u(" %10d %6.1f"), totalByteCount,
  5877. (double)allocableFreeByteCount / (double)totalByteCount * 100);
  5878. #if ENABLE_PARTIAL_GC
  5879. if (this->enablePartialCollect &&
  5880. (type == HeapBlock::HeapBlockType::SmallNormalBlockType
  5881. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
  5882. #ifdef RECYCLER_WRITE_BARRIER
  5883. || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
  5884. || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
  5885. #endif
  5886. || type == HeapBlock::HeapBlockType::MediumNormalBlockType
  5887. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
  5888. #ifdef RECYCLER_WRITE_BARRIER
  5889. || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
  5890. || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
  5891. #endif
  5892. ))
  5893. {
  5894. Output::Print(_u(" %6.1f"), (double)partialUnusedBytes / (double)totalByteCount * 100);
  5895. }
  5896. #endif
  5897. }
  5898. void
  5899. Recycler::PrintHeuristicCollectionStats()
  5900. {
  5901. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5902. Output::Print(_u("GC Trigger : %10s %10s %10s"), _u("Start"), _u("Continue"), _u("Finish"));
  5903. #if ENABLE_PARTIAL_GC
  5904. if (this->enablePartialCollect)
  5905. {
  5906. Output::Print(_u(" | Heuristics : %10s %10s %5s"), _u(""), _u(""), _u("%"));
  5907. }
  5908. #endif
  5909. Output::Print(_u("\n"));
  5910. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5911. Output::Print(_u(" Alloc bytes : %10d %10d %10d"), collectionStats.startCollectAllocBytes, collectionStats.continueCollectAllocBytes, this->autoHeap.uncollectedAllocBytes);
  5912. #if ENABLE_PARTIAL_GC
  5913. if (this->enablePartialCollect)
  5914. {
  5915. Output::Print(_u(" | Cost : %10d %10d %5.1f"), collectionStats.rescanRootBytes, collectionStats.estimatedPartialReuseBytes, collectionStats.collectCost * 100);
  5916. }
  5917. #endif
  5918. Output::Print(_u("\n"));
  5919. #if ENABLE_PARTIAL_GC
  5920. if (this->enablePartialCollect)
  5921. {
  5922. Output::Print(_u(" | Efficacy : %10s %10s %5.1f\n"), _u(""), _u(""), collectionStats.collectEfficacy * 100);
  5923. }
  5924. #endif
  5925. #if ENABLE_PARTIAL_GC
  5926. if (this->enablePartialCollect)
  5927. {
  5928. Output::Print(_u(" New page : %10d %10s %10d"), collectionStats.startCollectNewPageCount, _u(""), autoHeap.uncollectedNewPageCount);
  5929. Output::Print(_u(" | Partial Uncollect New Page : %10d %10d"), collectionStats.uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize, this->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize);
  5930. Output::Print(_u("\n"));
  5931. }
  5932. #endif
  5933. Output::Print(_u(" Finish try : %10d %10s %10s"), collectionStats.finishCollectTryCount, _u(""), _u(""));
  5934. #if ENABLE_PARTIAL_GC
  5935. if (this->enablePartialCollect)
  5936. {
  5937. Output::Print(_u(" | Partial Reuse Min Free Bytes : %10d"), collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes * AutoSystemInfo::PageSize);
  5938. }
  5939. #endif
  5940. Output::Print(_u("\n"));
  5941. }
  5942. void
  5943. Recycler::PrintMarkCollectionStats()
  5944. {
  5945. size_t nonMark = collectionStats.tryMarkCount + collectionStats.tryMarkInteriorCount - collectionStats.remarkCount - collectionStats.markData.markCount;
  5946. size_t invalidCount = nonMark - collectionStats.tryMarkNullCount - collectionStats.tryMarkUnalignedCount
  5947. - collectionStats.tryMarkNonRecyclerMemoryCount
  5948. - collectionStats.tryMarkInteriorNonRecyclerMemoryCount
  5949. - collectionStats.tryMarkInteriorNullCount;
  5950. size_t leafCount = collectionStats.markData.markCount - collectionStats.scanCount;
  5951. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5952. Output::Print(_u("Try Mark :%9s %5s %10s | Non-Mark : %9s %5s | Mark :%9s %5s \n"), _u("Count"), _u("%"), _u("Bytes"), _u("Count"), _u("%"), _u("Count"), _u("%"));
  5953. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  5954. Output::Print(_u(" TryMark :%9d %10d | Null : %9d %5.1f | Scan :%9d %5.1f\n"),
  5955. collectionStats.tryMarkCount, collectionStats.tryMarkCount * sizeof(void *),
  5956. collectionStats.tryMarkNullCount, (double)collectionStats.tryMarkNullCount / (double)nonMark * 100,
  5957. collectionStats.scanCount, (double)collectionStats.scanCount / (double)collectionStats.markData.markCount * 100);
  5958. Output::Print(_u(" Non-Mark :%9d %5.1f | Unaligned : %9d %5.1f | Leaf :%9d %5.1f\n"),
  5959. nonMark, (double)nonMark / (double)collectionStats.tryMarkCount * 100,
  5960. collectionStats.tryMarkUnalignedCount, (double)collectionStats.tryMarkUnalignedCount / (double)nonMark * 100,
  5961. leafCount, (double)leafCount / (double)collectionStats.markData.markCount * 100);
  5962. Output::Print(_u(" Mark :%9d %5.1f %10d | Non GC : %9d %5.1f | Track :%9d\n"),
  5963. collectionStats.markData.markCount, (double)collectionStats.markData.markCount / (double)collectionStats.tryMarkCount * 100, collectionStats.markData.markBytes,
  5964. collectionStats.tryMarkNonRecyclerMemoryCount, (double)collectionStats.tryMarkNonRecyclerMemoryCount / (double)nonMark * 100,
  5965. collectionStats.trackCount);
  5966. Output::Print(_u(" Remark :%9d %5.1f | Invalid : %9d %5.1f \n"),
  5967. collectionStats.remarkCount, (double)collectionStats.remarkCount / (double)collectionStats.tryMarkCount * 100,
  5968. invalidCount, (double)invalidCount / (double)nonMark * 100);
  5969. Output::Print(_u(" TryMark Int:%9d %10d | Null Int : %9d %5.1f | Root :%9d | New :%9d\n"),
  5970. collectionStats.tryMarkInteriorCount, collectionStats.tryMarkInteriorCount * sizeof(void *),
  5971. collectionStats.tryMarkInteriorNullCount, (double)collectionStats.tryMarkInteriorNullCount / (double)nonMark * 100,
  5972. collectionStats.rootCount, collectionStats.markThruNewObjCount);
  5973. Output::Print(_u(" | Non GC Int: %9d %5.1f | Stack :%9d | NewFalse:%9d\n"),
  5974. collectionStats.tryMarkInteriorNonRecyclerMemoryCount, (double)collectionStats.tryMarkInteriorNonRecyclerMemoryCount / (double)nonMark * 100,
  5975. collectionStats.stackCount, collectionStats.markThruFalseNewObjCount);
  5976. }
  5977. void
  5978. Recycler::PrintBackgroundCollectionStat(RecyclerCollectionStats::MarkData const& markData)
  5979. {
  5980. Output::Print(_u("BgSmall : %5d %6d %10d | BgLarge : %5d %6d %10d | BgMark :%9d "),
  5981. markData.rescanPageCount,
  5982. markData.rescanObjectCount,
  5983. markData.rescanObjectByteCount,
  5984. markData.rescanLargePageCount,
  5985. markData.rescanLargeObjectCount,
  5986. markData.rescanLargeByteCount,
  5987. markData.markCount);
  5988. double markRatio = (double)markData.markCount / (double)collectionStats.markData.markCount * 100;
  5989. if (markRatio == 100.0)
  5990. {
  5991. Output::Print(_u(" 100"));
  5992. }
  5993. else
  5994. {
  5995. Output::Print(_u("%4.1f"), markRatio);
  5996. }
  5997. Output::Print(_u("\n"));
  5998. }
  5999. void
  6000. Recycler::PrintBackgroundCollectionStats()
  6001. {
  6002. #if ENABLE_CONCURRENT_GC
  6003. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6004. Output::Print(_u("BgSmall : %5s %6s %10s | BgLarge : %5s %6s %10s | BgMark :%9s %4s %s\n"),
  6005. _u("Pages"), _u("Count"), _u("Bytes"), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"), _u("%"), _u("NonLeafBytes %"));
  6006. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6007. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[0]);
  6008. for (uint repeatCount = 1; repeatCount < RecyclerHeuristic::MaxBackgroundRepeatMarkCount; repeatCount++)
  6009. {
  6010. if (collectionStats.backgroundMarkData[repeatCount].markCount == 0)
  6011. {
  6012. break;
  6013. }
  6014. collectionStats.backgroundMarkData[repeatCount].rescanPageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanPageCount;
  6015. collectionStats.backgroundMarkData[repeatCount].rescanObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectCount;
  6016. collectionStats.backgroundMarkData[repeatCount].rescanObjectByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectByteCount;
  6017. collectionStats.backgroundMarkData[repeatCount].rescanLargePageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargePageCount;
  6018. collectionStats.backgroundMarkData[repeatCount].rescanLargeObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeObjectCount;
  6019. collectionStats.backgroundMarkData[repeatCount].rescanLargeByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeByteCount;
  6020. this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[repeatCount]);
  6021. }
  6022. #endif
  6023. }
  6024. void
  6025. Recycler::PrintMemoryStats()
  6026. {
  6027. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6028. Output::Print(_u("Memory (Bytes) %4s %10s %10s %10s %6s %6s\n"), _u("Live"), _u("Free"), _u("Unused"), _u("Total"), _u("Free%"), _u("Unused%"));
  6029. Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
  6030. PrintHeapBlockMemoryStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6031. Output::Print(_u("\n"));
  6032. PrintHeapBlockMemoryStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6033. Output::Print(_u("\n"));
  6034. #ifdef RECYCLER_WRITE_BARRIER
  6035. PrintHeapBlockMemoryStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6036. Output::Print(_u("\n"));
  6037. PrintHeapBlockMemoryStats(_u("SmFinSWB"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6038. Output::Print(_u("\n"));
  6039. #endif
  6040. PrintHeapBlockMemoryStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6041. Output::Print(_u("\n"));
  6042. PrintHeapBlockMemoryStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6043. Output::Print(_u("\n"));
  6044. PrintHeapBlockMemoryStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6045. Output::Print(_u("\n"));
  6046. #ifdef RECYCLER_WRITE_BARRIER
  6047. PrintHeapBlockMemoryStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6048. Output::Print(_u("\n"));
  6049. PrintHeapBlockMemoryStats(_u("MdFinSWB"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6050. Output::Print(_u("\n"));
  6051. #endif
  6052. PrintHeapBlockMemoryStats(_u("MdLeaf"), HeapBlock::MediumLeafBlockType);
  6053. Output::Print(_u("\n"));
  6054. size_t largeHeapBlockUnusedByteCount = collectionStats.largeHeapBlockTotalByteCount - collectionStats.largeHeapBlockUsedByteCount
  6055. - collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType];
  6056. Output::Print(_u(" Large: %10d %10d %10d %10d %6.1f %6.1f\n"),
  6057. collectionStats.largeHeapBlockUsedByteCount,
  6058. collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType],
  6059. largeHeapBlockUnusedByteCount,
  6060. collectionStats.largeHeapBlockTotalByteCount,
  6061. (double)collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType] / (double)collectionStats.largeHeapBlockTotalByteCount * 100,
  6062. (double)largeHeapBlockUnusedByteCount / (double)collectionStats.largeHeapBlockTotalByteCount * 100);
  6063. Output::Print(_u("\nSmall heap block zeroing stats since last GC\n"));
  6064. Output::Print(_u("Number of blocks with sweep state empty: normal=%d finalizable=%d leaf=%d\nNumber of blocks zeroed: %d\n"),
  6065. collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockType]
  6066. #ifdef RECYCLER_WRITE_BARRIER
  6067. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockWithBarrierType]
  6068. #endif
  6069. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockType]
  6070. #ifdef RECYCLER_WRITE_BARRIER
  6071. + collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockWithBarrierType]
  6072. #endif
  6073. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockType]
  6074. #ifdef RECYCLER_WRITE_BARRIER
  6075. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockWithBarrierType]
  6076. #endif
  6077. , collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockType]
  6078. #ifdef RECYCLER_WRITE_BARRIER
  6079. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockWithBarrierType]
  6080. #endif
  6081. , collectionStats.numEmptySmallBlocks[HeapBlock::SmallLeafBlockType]
  6082. + collectionStats.numEmptySmallBlocks[HeapBlock::MediumLeafBlockType],
  6083. collectionStats.numZeroedOutSmallBlocks);
  6084. }
  6085. void
  6086. Recycler::PrintCollectStats()
  6087. {
  6088. Output::Print(_u("Collection Stats:\n"));
  6089. PrintHeuristicCollectionStats();
  6090. PrintMarkCollectionStats();
  6091. PrintBackgroundCollectionStats();
  6092. size_t freeCount = collectionStats.objectSweptCount - collectionStats.objectSweptFreeListCount;
  6093. size_t freeBytes = collectionStats.objectSweptBytes - collectionStats.objectSweptFreeListBytes;
  6094. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6095. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6096. Output::Print(_u("Rescan : %5s %6s %10s | Track : %5s | "), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"));
  6097. #endif
  6098. Output::Print(_u("Sweep : %7s | SweptObj : %5s %5s %10s\n"), _u("Count"), _u("Count"), _u("%%"), _u("Bytes"));
  6099. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6100. Output::Print(_u(" Small : "));
  6101. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6102. Output::Print(_u("%5d %6d %10d | "), collectionStats.markData.rescanPageCount, collectionStats.markData.rescanObjectCount, collectionStats.markData.rescanObjectByteCount);
  6103. #endif
  6104. #if ENABLE_CONCURRENT_GC
  6105. Output::Print(_u("Process : %5d | "), collectionStats.trackedObjectCount);
  6106. #else
  6107. Output::Print(_u(" | "));
  6108. #endif
  6109. Output::Print(_u(" Scan : %7d | Free : %6d %5.1f %10d\n"),
  6110. collectionStats.objectSweepScanCount,
  6111. freeCount, (double)freeCount / (double) collectionStats.objectSweptCount * 100, freeBytes);
  6112. Output::Print(_u(" Large : "));
  6113. #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
  6114. Output::Print(_u("%5d %6d %10d | "),
  6115. collectionStats.markData.rescanLargePageCount, collectionStats.markData.rescanLargeObjectCount, collectionStats.markData.rescanLargeByteCount);
  6116. #endif
  6117. #if ENABLE_PARTIAL_GC
  6118. Output::Print(_u("Client : %5d | "), collectionStats.clientTrackedObjectCount);
  6119. #else
  6120. Output::Print(_u(" | "));
  6121. #endif
  6122. Output::Print(_u(" Finalize : %7d | Free List: %6d %5.1f %10d\n"),
  6123. collectionStats.finalizeSweepCount,
  6124. collectionStats.objectSweptFreeListCount, (double)collectionStats.objectSweptFreeListCount / (double) collectionStats.objectSweptCount * 100, collectionStats.objectSweptFreeListBytes);
  6125. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6126. Output::Print(_u("SweptBlk: Live Free Total Free%% : Swept Swept%% : CSwpt CSwpt%%"));
  6127. #if ENABLE_PARTIAL_GC
  6128. if (this->enablePartialCollect)
  6129. {
  6130. Output::Print(_u(" | Partial : Count Bytes Existing"));
  6131. }
  6132. #endif
  6133. Output::Print(_u("\n"));
  6134. Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
  6135. PrintHeapBlockStats(_u("Small"), HeapBlock::SmallNormalBlockType);
  6136. #if ENABLE_PARTIAL_GC
  6137. if (this->enablePartialCollect)
  6138. {
  6139. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6140. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType],
  6141. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6142. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType] * AutoSystemInfo::PageSize
  6143. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallNormalBlockType]);
  6144. }
  6145. #endif
  6146. Output::Print(_u("\n"));
  6147. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
  6148. #if ENABLE_PARTIAL_GC
  6149. if (this->enablePartialCollect)
  6150. {
  6151. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6152. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6153. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6154. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6155. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6156. }
  6157. #endif
  6158. Output::Print(_u("\n"));
  6159. #ifdef RECYCLER_WRITE_BARRIER
  6160. PrintHeapBlockStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
  6161. #if ENABLE_PARTIAL_GC
  6162. if (this->enablePartialCollect)
  6163. {
  6164. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6165. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType],
  6166. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType],
  6167. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6168. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType]);
  6169. }
  6170. #endif
  6171. Output::Print(_u("\n"));
  6172. PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockWithBarrierType);
  6173. #if ENABLE_PARTIAL_GC
  6174. if (this->enablePartialCollect)
  6175. {
  6176. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6177. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6178. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType],
  6179. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6180. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType]);
  6181. }
  6182. #endif
  6183. Output::Print(_u("\n"));
  6184. #endif
  6185. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6186. PrintHeapBlockStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
  6187. #if ENABLE_PARTIAL_GC
  6188. if (this->enablePartialCollect)
  6189. {
  6190. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6191. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType],
  6192. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType],
  6193. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6194. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType]);
  6195. }
  6196. #endif
  6197. Output::Print(_u("\n"));
  6198. PrintHeapBlockStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
  6199. #if ENABLE_PARTIAL_GC
  6200. if (this->enablePartialCollect)
  6201. {
  6202. Output::Print(_u(" | Reuse : %5d %10d %10d"),
  6203. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType],
  6204. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
  6205. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType] * AutoSystemInfo::PageSize
  6206. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType]);
  6207. }
  6208. #endif
  6209. Output::Print(_u("\n"));
  6210. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
  6211. #if ENABLE_PARTIAL_GC
  6212. if (this->enablePartialCollect)
  6213. {
  6214. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6215. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType],
  6216. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType],
  6217. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6218. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType]);
  6219. }
  6220. #endif
  6221. Output::Print(_u("\n"));
  6222. #ifdef RECYCLER_WRITE_BARRIER
  6223. PrintHeapBlockStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
  6224. #if ENABLE_PARTIAL_GC
  6225. if (this->enablePartialCollect)
  6226. {
  6227. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6228. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType],
  6229. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType],
  6230. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
  6231. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType]);
  6232. }
  6233. #endif
  6234. Output::Print(_u("\n"));
  6235. PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockWithBarrierType);
  6236. #if ENABLE_PARTIAL_GC
  6237. if (this->enablePartialCollect)
  6238. {
  6239. Output::Print(_u(" | Unused : %5d %10d %10d"),
  6240. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6241. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType],
  6242. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
  6243. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType]);
  6244. }
  6245. #endif
  6246. Output::Print(_u("\n"));
  6247. #endif
  6248. // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
  6249. PrintHeapBlockStats(_u("MdLeaf"), HeapBlock::MediumNormalBlockType);
  6250. #if ENABLE_PARTIAL_GC
  6251. if (this->enablePartialCollect)
  6252. {
  6253. Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
  6254. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType],
  6255. collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType],
  6256. collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
  6257. - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType]);
  6258. }
  6259. #endif
  6260. Output::Print(_u("\n"));
  6261. // TODO: This can't possibly be correct...check on this later
  6262. PrintHeapBlockStats(_u("Large"), HeapBlock::LargeBlockType);
  6263. #if ENABLE_PARTIAL_GC
  6264. if (this->enablePartialCollect)
  6265. {
  6266. Output::Print(_u(" | UnusedFin : %5d %10d %10d"),
  6267. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
  6268. collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
  6269. collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
  6270. - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
  6271. }
  6272. #endif
  6273. Output::Print(_u("\n"));
  6274. PrintMemoryStats();
  6275. Output::Flush();
  6276. }
  6277. #endif
  6278. #ifdef RECYCLER_ZERO_MEM_CHECK
  6279. void
  6280. Recycler::VerifyZeroFill(void * address, size_t size)
  6281. {
  6282. byte expectedFill = 0;
  6283. #ifdef RECYCLER_MEMORY_VERIFY
  6284. if (this->VerifyEnabled())
  6285. {
  6286. expectedFill = Recycler::VerifyMemFill;
  6287. }
  6288. #endif
  6289. for (uint i = 0; i < size; i++)
  6290. {
  6291. Assert(((byte *)address)[i] == expectedFill);
  6292. }
  6293. }
  6294. #endif
  6295. #ifdef RECYCLER_MEMORY_VERIFY
  6296. void
  6297. Recycler::FillCheckPad(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6298. {
  6299. if (this->VerifyEnabled())
  6300. {
  6301. void* addressToVerify = address;
  6302. size_t sizeToVerify = alignedAllocSize;
  6303. if (objectAlreadyInitialized)
  6304. {
  6305. addressToVerify = ((char*) address + size);
  6306. sizeToVerify = (alignedAllocSize - size);
  6307. }
  6308. // Actually this is filling the non-pad to zero
  6309. VerifyCheckFill(addressToVerify, sizeToVerify - sizeof(size_t));
  6310. FillPadNoCheck(address, size, alignedAllocSize, objectAlreadyInitialized);
  6311. }
  6312. }
  6313. void
  6314. Recycler::FillPadNoCheck(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
  6315. {
  6316. // Ignore the first word
  6317. if (!objectAlreadyInitialized && size > sizeof(FreeObject))
  6318. {
  6319. memset((char *)address + sizeof(FreeObject), 0, size - sizeof(FreeObject));
  6320. }
  6321. // write the pad size at the end;
  6322. *(size_t *)((char *)address + alignedAllocSize - sizeof(size_t)) = alignedAllocSize - size;
  6323. }
  6324. void Recycler::Verify(Js::Phase phase)
  6325. {
  6326. if (verifyEnabled && (!this->CollectionInProgress()))
  6327. {
  6328. if (GetRecyclerFlagsTable().RecyclerVerify.IsEnabled(phase))
  6329. {
  6330. autoHeap.Verify();
  6331. }
  6332. }
  6333. }
  6334. void Recycler::VerifyCheck(BOOL cond, char16 const * msg, void * address, void * corruptedAddress)
  6335. {
  6336. if (!(cond))
  6337. {
  6338. fwprintf(stderr, _u("RECYCLER CORRUPTION: StartAddress=%p CorruptedAddress=%p: %s"), address, corruptedAddress, msg);
  6339. Js::Throw::FatalInternalError();
  6340. }
  6341. }
  6342. void Recycler::VerifyCheckFill(void * address, size_t size)
  6343. {
  6344. for (byte * i = (byte *)address; i < (byte *)address + size; i++)
  6345. {
  6346. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("memory written after freed"), address, i);
  6347. }
  6348. }
  6349. void Recycler::VerifyCheckPadExplicitFreeList(void * address, size_t size)
  6350. {
  6351. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6352. size_t padding = *paddingAddress;
  6353. #pragma warning(suppress:4310)
  6354. Assert(padding != (size_t)0xCACACACACACACACA); // Explicit free objects have to have been initialized at some point before they were freed
  6355. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6356. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6357. {
  6358. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6359. }
  6360. }
  6361. void Recycler::VerifyCheckPad(void * address, size_t size)
  6362. {
  6363. size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
  6364. size_t padding = *paddingAddress;
  6365. #pragma warning(suppress:4310)
  6366. if (padding == (size_t)0xCACACACACACACACA)
  6367. {
  6368. // Nascent block have objects that are not initialized with pad size
  6369. Recycler::VerifyCheckFill(address, size);
  6370. return;
  6371. }
  6372. Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
  6373. for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
  6374. {
  6375. Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
  6376. }
  6377. }
  6378. #endif
  6379. Recycler::AutoSetupRecyclerForNonCollectingMark::AutoSetupRecyclerForNonCollectingMark(Recycler& recycler, bool setupForHeapEnumeration)
  6380. : m_recycler(recycler), m_setupDone(false)
  6381. {
  6382. if (! setupForHeapEnumeration)
  6383. {
  6384. DoCommonSetup();
  6385. }
  6386. }
  6387. void Recycler::AutoSetupRecyclerForNonCollectingMark::DoCommonSetup()
  6388. {
  6389. Assert(m_recycler.collectionState == CollectionStateNotCollecting || m_recycler.collectionState == CollectionStateExit);
  6390. #if ENABLE_CONCURRENT_GC
  6391. Assert(!m_recycler.DoQueueTrackedObject());
  6392. #endif
  6393. #if ENABLE_PARTIAL_GC
  6394. // We need to get out of partial collect before we do the mark because we
  6395. // will mess with the free bit vector state
  6396. // GC-CONSIDER: don't mess with the free bit vector?
  6397. if (m_recycler.inPartialCollectMode)
  6398. {
  6399. m_recycler.FinishPartialCollect();
  6400. }
  6401. #endif
  6402. m_previousCollectionState = m_recycler.collectionState;
  6403. #ifdef RECYCLER_STATS
  6404. m_previousCollectionStats = m_recycler.collectionStats;
  6405. memset(&m_recycler.collectionStats, 0, sizeof(RecyclerCollectionStats));
  6406. #endif
  6407. m_setupDone = true;
  6408. }
  6409. void Recycler::AutoSetupRecyclerForNonCollectingMark::SetupForHeapEnumeration()
  6410. {
  6411. Assert(!m_recycler.isHeapEnumInProgress);
  6412. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6413. m_recycler.EnsureNotCollecting();
  6414. DoCommonSetup();
  6415. m_recycler.ResetMarks(ResetMarkFlags_HeapEnumeration);
  6416. m_recycler.collectionState = CollectionStateNotCollecting;
  6417. m_recycler.isHeapEnumInProgress = true;
  6418. m_recycler.isCollectionDisabled = true;
  6419. }
  6420. Recycler::AutoSetupRecyclerForNonCollectingMark::~AutoSetupRecyclerForNonCollectingMark()
  6421. {
  6422. Assert(m_setupDone);
  6423. Assert(!m_recycler.allowAllocationDuringHeapEnum);
  6424. #ifdef RECYCLER_STATS
  6425. m_recycler.collectionStats = m_previousCollectionStats;
  6426. #endif
  6427. m_recycler.collectionState = m_previousCollectionState;
  6428. m_recycler.isHeapEnumInProgress = false;
  6429. m_recycler.isCollectionDisabled = false;
  6430. }
  6431. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  6432. bool Recycler::DumpObjectGraph(RecyclerObjectGraphDumper::Param * param)
  6433. {
  6434. bool succeeded = false;
  6435. bool isExited = (this->collectionState == CollectionStateExit);
  6436. if (isExited)
  6437. {
  6438. this->collectionState = CollectionStateNotCollecting;
  6439. }
  6440. if (this->collectionState != CollectionStateNotCollecting)
  6441. {
  6442. Output::Print(_u("Can't dump object graph when collecting\n"));
  6443. Output::Flush();
  6444. return succeeded;
  6445. }
  6446. BEGIN_NO_EXCEPTION
  6447. {
  6448. RecyclerObjectGraphDumper objectGraphDumper(this, param);
  6449. Recycler::AutoSetupRecyclerForNonCollectingMark AutoSetupRecyclerForNonCollectingMark(*this);
  6450. AutoRestoreValue<bool> skipStackToggle(&this->skipStack, this->skipStack || (param && param->skipStack));
  6451. this->Mark();
  6452. this->objectGraphDumper = nullptr;
  6453. #ifdef RECYCLER_STATS
  6454. if (param)
  6455. {
  6456. param->stats = this->collectionStats;
  6457. }
  6458. #endif
  6459. succeeded = !objectGraphDumper.isOutOfMemory;
  6460. }
  6461. END_NO_EXCEPTION
  6462. if (isExited)
  6463. {
  6464. this->collectionState = CollectionStateExit;
  6465. }
  6466. if (!succeeded)
  6467. {
  6468. Output::Print(_u("Out of memory dumping object graph\n"));
  6469. }
  6470. Output::Flush();
  6471. return succeeded;
  6472. }
  6473. void
  6474. Recycler::DumpObjectDescription(void *objectAddress)
  6475. {
  6476. #ifdef PROFILE_RECYCLER_ALLOC
  6477. type_info const * typeinfo = nullptr;
  6478. bool isArray = false;
  6479. if (this->trackerDictionary)
  6480. {
  6481. TrackerData * trackerData = GetTrackerData(objectAddress);
  6482. if (trackerData != nullptr)
  6483. {
  6484. typeinfo = trackerData->typeinfo;
  6485. isArray = trackerData->isArray;
  6486. }
  6487. else
  6488. {
  6489. Assert(false);
  6490. }
  6491. }
  6492. RecyclerObjectDumper::DumpObject(typeinfo, isArray, objectAddress);
  6493. #else
  6494. Output::Print(_u("Address %p"), objectAddress);
  6495. #endif
  6496. }
  6497. #endif
  6498. #ifdef RECYCLER_STRESS
  6499. // All stress mode collect art implicitly instantiate here
  6500. bool
  6501. Recycler::StressCollectNow()
  6502. {
  6503. if (this->recyclerStress)
  6504. {
  6505. this->CollectNow<CollectStress>();
  6506. return true;
  6507. }
  6508. #if ENABLE_CONCURRENT_GC
  6509. else if (this->recyclerBackgroundStress)
  6510. {
  6511. this->CollectNow<CollectBackgroundStress>();
  6512. return true;
  6513. }
  6514. else if ((this->enableConcurrentMark || this->enableConcurrentSweep)
  6515. && (this->recyclerConcurrentStress
  6516. || this->recyclerConcurrentRepeatStress))
  6517. {
  6518. #if ENABLE_PARTIAL_GC
  6519. if (this->recyclerPartialStress)
  6520. {
  6521. this->CollectNow<CollectConcurrentPartialStress>();
  6522. return true;
  6523. }
  6524. else
  6525. #endif // ENABLE_PARTIAL_GC
  6526. {
  6527. this->CollectNow<CollectConcurrentStress>();
  6528. return true;
  6529. }
  6530. }
  6531. #endif // ENABLE_CONCURRENT_GC
  6532. #if ENABLE_PARTIAL_GC
  6533. else if (this->recyclerPartialStress)
  6534. {
  6535. this->CollectNow<CollectPartialStress>();
  6536. return true;
  6537. }
  6538. #endif // ENABLE_PARTIAL_GC
  6539. return false;
  6540. }
  6541. #endif // RECYCLER_STRESS
  6542. #ifdef TRACK_ALLOC
  6543. Recycler *
  6544. Recycler::TrackAllocInfo(TrackAllocData const& data)
  6545. {
  6546. #ifdef PROFILE_RECYCLER_ALLOC
  6547. if (this->trackerDictionary != nullptr)
  6548. {
  6549. Assert(nextAllocData.IsEmpty());
  6550. nextAllocData = data;
  6551. }
  6552. #endif
  6553. return this;
  6554. }
  6555. void
  6556. Recycler::ClearTrackAllocInfo(TrackAllocData* data/* = NULL*/)
  6557. {
  6558. #ifdef PROFILE_RECYCLER_ALLOC
  6559. if (this->trackerDictionary != nullptr)
  6560. {
  6561. AssertMsg(!nextAllocData.IsEmpty(), "Missing tracking information for this allocation, are you not using the macros?");
  6562. if (data)
  6563. {
  6564. *data = nextAllocData;
  6565. }
  6566. nextAllocData.Clear();
  6567. }
  6568. #endif
  6569. }
  6570. #ifdef PROFILE_RECYCLER_ALLOC
  6571. bool
  6572. Recycler::DoProfileAllocTracker()
  6573. {
  6574. bool doTracker = false;
  6575. #ifdef RECYCLER_DUMP_OBJECT_GRAPH
  6576. doTracker = Js::Configuration::Global.flags.DumpObjectGraphOnExit
  6577. || Js::Configuration::Global.flags.DumpObjectGraphOnCollect
  6578. || Js::Configuration::Global.flags.DumpObjectGraphOnEnum;
  6579. #endif
  6580. #ifdef LEAK_REPORT
  6581. if (Js::Configuration::Global.flags.IsEnabled(Js::LeakReportFlag))
  6582. {
  6583. doTracker = true;
  6584. }
  6585. #endif
  6586. #ifdef CHECK_MEMORY_LEAK
  6587. if (Js::Configuration::Global.flags.CheckMemoryLeak)
  6588. {
  6589. doTracker = true;
  6590. }
  6591. #endif
  6592. return doTracker || MemoryProfiler::DoTrackRecyclerAllocation();
  6593. }
  6594. void
  6595. Recycler::InitializeProfileAllocTracker()
  6596. {
  6597. if (DoProfileAllocTracker())
  6598. {
  6599. trackerDictionary = NoCheckHeapNew(TypeInfotoTrackerItemMap, &NoCheckHeapAllocator::Instance, 163);
  6600. #pragma prefast(suppress:6031, "InitializeCriticalSectionAndSpinCount always succeed since Vista. No need to check return value");
  6601. InitializeCriticalSectionAndSpinCount(&trackerCriticalSection, 1000);
  6602. }
  6603. nextAllocData.Clear();
  6604. }
  6605. void
  6606. Recycler::TrackAllocCore(void * object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  6607. {
  6608. auto&& typeInfo = trackAllocData.GetTypeInfo();
  6609. if (CONFIG_FLAG(KeepRecyclerTrackData))
  6610. {
  6611. TrackFree((char*)object, size);
  6612. }
  6613. Assert(GetTrackerData(object) == nullptr || GetTrackerData(object) == &TrackerData::ExplicitFreeListObjectData);
  6614. Assert(typeInfo != nullptr);
  6615. TrackerItem * item;
  6616. size_t allocCount = trackAllocData.GetCount();
  6617. size_t itemSize = (size - trackAllocData.GetPlusSize());
  6618. bool isArray;
  6619. if (allocCount != (size_t)-1)
  6620. {
  6621. isArray = true;
  6622. itemSize = itemSize / allocCount;
  6623. }
  6624. else
  6625. {
  6626. isArray = false;
  6627. allocCount = 1;
  6628. }
  6629. if (!trackerDictionary->TryGetValue(typeInfo, &item))
  6630. {
  6631. if (CONFIG_FLAG(KeepRecyclerTrackData) && isArray) // type info is not useful record stack instead
  6632. {
  6633. size_t stackTraceSize = 16 * sizeof(void*);
  6634. item = NoCheckHeapNewPlus(stackTraceSize, TrackerItem, typeInfo);
  6635. StackBackTrace::Capture((char*)&item[1], stackTraceSize, 0);
  6636. }
  6637. else
  6638. {
  6639. item = NoCheckHeapNew(TrackerItem, typeInfo);
  6640. }
  6641. item->instanceData.ItemSize = itemSize;
  6642. item->arrayData.ItemSize = itemSize;
  6643. trackerDictionary->Item(typeInfo, item);
  6644. }
  6645. else
  6646. {
  6647. Assert(item->instanceData.typeinfo == typeInfo);
  6648. Assert(item->instanceData.ItemSize == itemSize);
  6649. Assert(item->arrayData.ItemSize == itemSize);
  6650. }
  6651. TrackerData& data = (isArray)? item->arrayData : item->instanceData;
  6652. data.ItemCount += allocCount;
  6653. data.AllocCount++;
  6654. data.ReqSize += size;
  6655. data.AllocSize += HeapInfo::GetAlignedSizeNoCheck(size);
  6656. #ifdef TRACE_OBJECT_LIFETIME
  6657. data.TraceLifetime = traceLifetime;
  6658. if (traceLifetime)
  6659. {
  6660. Output::Print(data.isArray ? _u("Allocated %S[] %p\n") : _u("Allocated %S %p\n"), data.typeinfo->name(), object);
  6661. }
  6662. #endif
  6663. #ifdef PERF_COUNTERS
  6664. ++data.counter;
  6665. data.sizeCounter += HeapInfo::GetAlignedSizeNoCheck(size);
  6666. #endif
  6667. SetTrackerData(object, &data);
  6668. }
  6669. void* Recycler::TrackAlloc(void* object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
  6670. {
  6671. if (this->trackerDictionary != nullptr)
  6672. {
  6673. Assert(nextAllocData.IsEmpty()); // should have been cleared
  6674. EnterCriticalSection(&trackerCriticalSection);
  6675. TrackAllocCore(object, size, trackAllocData);
  6676. LeaveCriticalSection(&trackerCriticalSection);
  6677. }
  6678. return object;
  6679. }
  6680. void
  6681. Recycler::TrackIntegrate(__in_ecount(blockSize) char * blockAddress, size_t blockSize, size_t allocSize, size_t objectSize, const TrackAllocData& trackAllocData)
  6682. {
  6683. if (this->trackerDictionary != nullptr)
  6684. {
  6685. Assert(nextAllocData.IsEmpty()); // should have been cleared
  6686. EnterCriticalSection(&trackerCriticalSection);
  6687. char * address = blockAddress;
  6688. char * blockEnd = blockAddress + blockSize;
  6689. while (address + allocSize <= blockEnd)
  6690. {
  6691. TrackAllocCore(address, objectSize, trackAllocData);
  6692. address += allocSize;
  6693. }
  6694. LeaveCriticalSection(&trackerCriticalSection);
  6695. }
  6696. }
  6697. BOOL Recycler::TrackFree(const char* address, size_t size)
  6698. {
  6699. if (this->trackerDictionary != nullptr)
  6700. {
  6701. EnterCriticalSection(&trackerCriticalSection);
  6702. TrackerData * data = GetTrackerData((char *)address);
  6703. if (data != nullptr)
  6704. {
  6705. if (data != &TrackerData::EmptyData)
  6706. {
  6707. #ifdef PERF_COUNTERS
  6708. --data->counter;
  6709. data->sizeCounter -= size;
  6710. #endif
  6711. if (data->typeinfo == &typeid(RecyclerWeakReferenceBase))
  6712. {
  6713. TrackFreeWeakRef((RecyclerWeakReferenceBase *)address);
  6714. }
  6715. data->FreeSize += size;
  6716. data->FreeCount++;
  6717. #ifdef TRACE_OBJECT_LIFETIME
  6718. if (data->TraceLifetime)
  6719. {
  6720. Output::Print(data->isArray ? _u("Freed %S[] %p\n") : _u("Freed %S %p\n"), data->typeinfo->name(), address);
  6721. }
  6722. #endif
  6723. }
  6724. SetTrackerData((char *)address, nullptr);
  6725. }
  6726. else
  6727. {
  6728. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  6729. {
  6730. Assert(false);
  6731. }
  6732. }
  6733. LeaveCriticalSection(&trackerCriticalSection);
  6734. }
  6735. return true;
  6736. }
  6737. Recycler::TrackerData *
  6738. Recycler::GetTrackerData(void * address)
  6739. {
  6740. HeapBlock * heapBlock = this->FindHeapBlock(address);
  6741. Assert(heapBlock != nullptr);
  6742. return (Recycler::TrackerData *)heapBlock->GetTrackerData(address);
  6743. }
  6744. void
  6745. Recycler::SetTrackerData(void * address, TrackerData * data)
  6746. {
  6747. HeapBlock * heapBlock = this->FindHeapBlock(address);
  6748. Assert(heapBlock != nullptr);
  6749. heapBlock->SetTrackerData(address, data);
  6750. }
  6751. void
  6752. Recycler::TrackUnallocated(__in char* address, __in char *endAddress, size_t sizeCat)
  6753. {
  6754. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  6755. {
  6756. if (this->trackerDictionary != nullptr)
  6757. {
  6758. EnterCriticalSection(&trackerCriticalSection);
  6759. while (address + sizeCat <= endAddress)
  6760. {
  6761. Assert(GetTrackerData(address) == nullptr);
  6762. SetTrackerData(address, &TrackerData::EmptyData);
  6763. address += sizeCat;
  6764. }
  6765. LeaveCriticalSection(&trackerCriticalSection);
  6766. }
  6767. }
  6768. }
  6769. void
  6770. Recycler::TrackAllocWeakRef(RecyclerWeakReferenceBase * weakRef)
  6771. {
  6772. Assert(weakRef->typeInfo != nullptr);
  6773. #if DBG && defined(PERF_COUNTERS)
  6774. if (this->trackerDictionary != nullptr)
  6775. {
  6776. TrackerItem * item;
  6777. if (trackerDictionary->TryGetValue(weakRef->typeInfo, &item))
  6778. {
  6779. weakRef->counter = &item->weakRefCounter;
  6780. }
  6781. else
  6782. {
  6783. weakRef->counter = &PerfCounter::RecyclerTrackerCounterSet::GetWeakRefPerfCounter(weakRef->typeInfo);
  6784. }
  6785. ++(*weakRef->counter);
  6786. }
  6787. #endif
  6788. }
  6789. void
  6790. Recycler::TrackFreeWeakRef(RecyclerWeakReferenceBase * weakRef)
  6791. {
  6792. #if DBG && defined(PERF_COUNTERS)
  6793. if (weakRef->counter != nullptr)
  6794. {
  6795. --(*weakRef->counter);
  6796. }
  6797. #endif
  6798. }
  6799. void
  6800. Recycler::PrintAllocStats()
  6801. {
  6802. if (this->trackerDictionary == nullptr)
  6803. {
  6804. return;
  6805. }
  6806. size_t itemCount = 0;
  6807. int allocCount = 0;
  6808. int64 reqSize = 0;
  6809. int64 allocSize = 0;
  6810. int freeCount = 0;
  6811. int64 freeSize = 0;
  6812. Output::Print(_u("=================================================================================================================\n"));
  6813. Output::Print(_u("Recycler Allocations\n"));
  6814. Output::Print(_u("=================================================================================================================\n"));
  6815. Output::Print(_u("ItemSize ItemCount AllocCount RequestSize AllocSize FreeCount FreeSize DiffCount DiffSize \n"));
  6816. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  6817. for (int i = 0; i < trackerDictionary->Count(); i++)
  6818. {
  6819. TrackerItem * item = trackerDictionary->GetValueAt(i);
  6820. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  6821. if (item->instanceData.AllocCount != 0)
  6822. {
  6823. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S\n"),
  6824. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  6825. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  6826. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  6827. itemCount += item->instanceData.ItemCount;
  6828. allocCount += item->instanceData.AllocCount;
  6829. reqSize += item->instanceData.ReqSize;
  6830. allocSize += item->instanceData.AllocSize;
  6831. freeCount += item->instanceData.FreeCount;
  6832. freeSize += item->instanceData.FreeSize;
  6833. }
  6834. if (item->arrayData.AllocCount != 0)
  6835. {
  6836. Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S[]\n"),
  6837. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  6838. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  6839. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  6840. itemCount += item->arrayData.ItemCount;
  6841. allocCount += item->arrayData.AllocCount;
  6842. reqSize += item->arrayData.ReqSize;
  6843. allocSize += item->arrayData.AllocSize;
  6844. freeCount += item->arrayData.FreeCount;
  6845. freeSize += item->arrayData.FreeSize;
  6846. }
  6847. }
  6848. Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
  6849. Output::Print(_u(" %8d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d **Total**\n"),
  6850. itemCount, allocCount, reqSize, allocSize, freeCount, freeSize, allocCount - freeCount, allocSize - freeSize);
  6851. #ifdef EXCEL_FRIENDLY_DUMP
  6852. Output::Print(_u("\nExcel friendly version\nItemSize\tItemCount\tAllocCount\tRequestSize\tAllocSize\tFreeCount\tFreeSize\tDiffCount\tDiffSize\tType\n"));
  6853. for (int i = 0; i < trackerDictionary->Count(); i++)
  6854. {
  6855. TrackerItem * item = trackerDictionary->GetValueAt(i);
  6856. type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
  6857. if (item->instanceData.AllocCount != 0)
  6858. {
  6859. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S\n"),
  6860. item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
  6861. item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
  6862. item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
  6863. }
  6864. if (item->arrayData.AllocCount != 0)
  6865. {
  6866. Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S[]\n"),
  6867. item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
  6868. item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
  6869. item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
  6870. }
  6871. }
  6872. #endif // EXCEL_FRIENDLY_DUMP
  6873. Output::Flush();
  6874. }
  6875. #endif // PROFILE_RECYCLER_ALLOC
  6876. #endif // TRACK_ALLOC
  6877. #ifdef RECYCLER_VERIFY_MARK
  6878. void
  6879. Recycler::VerifyMark()
  6880. {
  6881. VerifyMarkRoots();
  6882. // Can't really verify stack since the recycler code between ScanStack to now may have introduce false references.
  6883. // VerifyMarkStack();
  6884. autoHeap.VerifyMark();
  6885. }
  6886. void
  6887. Recycler::VerifyMarkRoots()
  6888. {
  6889. {
  6890. this->VerifyMark(transientPinnedObject);
  6891. pinnedObjectMap.Map([this](void * obj, PinRecord const &refCount)
  6892. {
  6893. if (refCount == 0)
  6894. {
  6895. Assert(this->hasPendingUnpinnedObject);
  6896. }
  6897. else
  6898. {
  6899. // Use the pinrecord as the source reference
  6900. this->VerifyMark(obj);
  6901. }
  6902. });
  6903. }
  6904. DList<GuestArenaAllocator, HeapAllocator>::Iterator guestArenaIter(&guestArenaList);
  6905. while (guestArenaIter.Next())
  6906. {
  6907. if (guestArenaIter.Data().pendingDelete)
  6908. {
  6909. Assert(this->hasPendingDeleteGuestArena);
  6910. }
  6911. else
  6912. {
  6913. VerifyMarkArena(&guestArenaIter.Data());
  6914. }
  6915. }
  6916. DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
  6917. while (externalGuestArenaIter.Next())
  6918. {
  6919. VerifyMarkArena(externalGuestArenaIter.Data());
  6920. }
  6921. // We can't check external roots here
  6922. }
  6923. void
  6924. Recycler::VerifyMarkArena(ArenaData * alloc)
  6925. {
  6926. VerifyMarkBigBlockList(alloc->GetBigBlocks(false));
  6927. VerifyMarkBigBlockList(alloc->GetFullBlocks());
  6928. VerifyMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
  6929. }
  6930. void
  6931. Recycler::VerifyMarkBigBlockList(BigBlock * memoryBlocks)
  6932. {
  6933. size_t scanRootBytes = 0;
  6934. BigBlock *blockp = memoryBlocks;
  6935. while (blockp != NULL)
  6936. {
  6937. void** base=(void**)blockp->GetBytes();
  6938. size_t slotCount = blockp->currentByte / sizeof(void*);
  6939. scanRootBytes += blockp->currentByte;
  6940. for (size_t i=0; i < slotCount; i++)
  6941. {
  6942. VerifyMark(base[i]);
  6943. }
  6944. blockp = blockp->nextBigBlock;
  6945. }
  6946. }
  6947. void
  6948. Recycler::VerifyMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
  6949. {
  6950. size_t scanRootBytes = 0;
  6951. ArenaMemoryBlock *blockp = memoryBlocks;
  6952. while (blockp != NULL)
  6953. {
  6954. void** base=(void**)blockp->GetBytes();
  6955. size_t slotCount = blockp->nbytes / sizeof(void*);
  6956. scanRootBytes += blockp->nbytes;
  6957. for (size_t i=0; i< slotCount; i++)
  6958. {
  6959. VerifyMark(base[i]);
  6960. }
  6961. blockp = blockp->next;
  6962. }
  6963. }
  6964. void
  6965. Recycler::VerifyMarkStack()
  6966. {
  6967. SAVE_THREAD_CONTEXT();
  6968. void ** stackTop = (void**) this->savedThreadContext.GetStackTop();
  6969. void * stackStart = GetStackBase();
  6970. Assert(stackStart > stackTop);
  6971. for (;stackTop < stackStart; stackTop++)
  6972. {
  6973. void* candidate = *stackTop;
  6974. VerifyMark(candidate);
  6975. }
  6976. void** registers = this->savedThreadContext.GetRegisters();
  6977. for (int i = 0; i < SavedRegisterState::NumRegistersToSave; i++)
  6978. {
  6979. VerifyMark(registers[i]);
  6980. }
  6981. }
  6982. bool
  6983. Recycler::VerifyMark(void * candidate)
  6984. {
  6985. void * realAddress;
  6986. HeapBlock * heapBlock;
  6987. if (this->enableScanInteriorPointers)
  6988. {
  6989. heapBlock = heapBlockMap.GetHeapBlock(candidate);
  6990. if (heapBlock == nullptr)
  6991. {
  6992. return false;
  6993. }
  6994. realAddress = heapBlock->GetRealAddressFromInterior(candidate);
  6995. if (realAddress == nullptr)
  6996. {
  6997. return false;
  6998. }
  6999. }
  7000. else
  7001. {
  7002. heapBlock = this->FindHeapBlock(candidate);
  7003. if (heapBlock == nullptr)
  7004. {
  7005. return false;
  7006. }
  7007. realAddress = candidate;
  7008. }
  7009. return heapBlock->VerifyMark(realAddress);
  7010. }
  7011. #endif
  7012. ArenaAllocator *
  7013. Recycler::CreateGuestArena(char16 const * name, void (*outOfMemoryFunc)())
  7014. {
  7015. // Note, guest arenas use the large block allocator.
  7016. return guestArenaList.PrependNode(&HeapAllocator::Instance, name, &recyclerLargeBlockPageAllocator, outOfMemoryFunc);
  7017. }
  7018. void
  7019. Recycler::DeleteGuestArena(ArenaAllocator * arenaAllocator)
  7020. {
  7021. GuestArenaAllocator * guestArenaAllocator = static_cast<GuestArenaAllocator *>(arenaAllocator);
  7022. #if ENABLE_CONCURRENT_GC
  7023. if (this->hasPendingConcurrentFindRoot)
  7024. {
  7025. // We are doing concurrent find root, don't modify the list and mark the arena to be delete
  7026. // later when we do find root in thread.
  7027. Assert(guestArenaList.HasElement(guestArenaAllocator));
  7028. this->hasPendingDeleteGuestArena = true;
  7029. guestArenaAllocator->pendingDelete = true;
  7030. }
  7031. else
  7032. #endif
  7033. {
  7034. guestArenaList.RemoveElement(&HeapAllocator::Instance, guestArenaAllocator);
  7035. }
  7036. }
  7037. #ifdef LEAK_REPORT
  7038. void
  7039. Recycler::ReportLeaks()
  7040. {
  7041. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7042. {
  7043. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7044. {
  7045. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7046. struct FakeMemory { Field(int) f; };
  7047. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7048. this->RootAddRef(f);
  7049. }
  7050. LeakReport::StartSection(_u("Object Graph"));
  7051. LeakReport::StartRedirectOutput();
  7052. RecyclerObjectGraphDumper::Param param = { 0 };
  7053. param.skipStack = true;
  7054. if (!this->DumpObjectGraph(&param))
  7055. {
  7056. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7057. LeakReport::Print(_u("ERROR: Out of memory generating leak report\n"));
  7058. param.stats.markData.markCount = 0;
  7059. }
  7060. LeakReport::EndRedirectOutput();
  7061. if (param.stats.markData.markCount != 0)
  7062. {
  7063. LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
  7064. LeakReport::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7065. param.stats.markData.markBytes, param.stats.markData.markCount);
  7066. #ifdef STACK_BACK_TRACE
  7067. if (GetRecyclerFlagsTable().LeakStackTrace)
  7068. {
  7069. LeakReport::StartSection(_u("Pinned object stack traces"));
  7070. LeakReport::StartRedirectOutput();
  7071. this->PrintPinnedObjectStackTraces();
  7072. LeakReport::EndRedirectOutput();
  7073. LeakReport::EndSection();
  7074. }
  7075. #endif
  7076. }
  7077. LeakReport::EndSection();
  7078. }
  7079. }
  7080. void
  7081. Recycler::ReportLeaksOnProcessDetach()
  7082. {
  7083. if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
  7084. {
  7085. AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): Process Termination"), this);
  7086. LeakReport::StartRedirectOutput();
  7087. ReportOnProcessDetach([=]() { this->ReportLeaks(); });
  7088. LeakReport::EndRedirectOutput();
  7089. }
  7090. }
  7091. #endif
  7092. #ifdef CHECK_MEMORY_LEAK
  7093. void
  7094. Recycler::CheckLeaks(char16 const * header)
  7095. {
  7096. if (GetRecyclerFlagsTable().CheckMemoryLeak && this->isPrimaryMarkContextInitialized)
  7097. {
  7098. if (GetRecyclerFlagsTable().ForceMemoryLeak)
  7099. {
  7100. AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  7101. struct FakeMemory { Field(int) f; };
  7102. FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
  7103. this->RootAddRef(f);
  7104. }
  7105. Output::CaptureStart();
  7106. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7107. Output::Print(_u("Recycler (%p): %s Leaked Roots\n"), this, header);
  7108. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7109. RecyclerObjectGraphDumper::Param param = { 0 };
  7110. param.dumpRootOnly = true;
  7111. param.skipStack = true;
  7112. if (!this->DumpObjectGraph(&param))
  7113. {
  7114. free(Output::CaptureEnd());
  7115. Output::Print(_u("ERROR: Out of memory generating leak report\n"));
  7116. return;
  7117. }
  7118. if (param.stats.markData.markCount != 0)
  7119. {
  7120. #ifdef STACK_BACK_TRACE
  7121. if (GetRecyclerFlagsTable().LeakStackTrace)
  7122. {
  7123. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7124. Output::Print(_u("Pinned object stack traces"));
  7125. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7126. this->PrintPinnedObjectStackTraces();
  7127. }
  7128. #endif
  7129. Output::Print(_u("-------------------------------------------------------------------------------------\n"));
  7130. Output::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
  7131. param.stats.markData.markBytes, param.stats.markData.markCount);
  7132. char16 * buffer = Output::CaptureEnd();
  7133. MemoryLeakCheck::AddLeakDump(buffer, param.stats.markData.markBytes, param.stats.markData.markCount);
  7134. #ifdef GENERATE_DUMP
  7135. if (GetRecyclerFlagsTable().IsEnabled(Js::DumpOnLeakFlag))
  7136. {
  7137. Js::Throw::GenerateDump(GetRecyclerFlagsTable().DumpOnLeak);
  7138. }
  7139. #endif
  7140. }
  7141. else
  7142. {
  7143. free(Output::CaptureEnd());
  7144. }
  7145. }
  7146. }
  7147. void
  7148. Recycler::CheckLeaksOnProcessDetach(char16 const * header)
  7149. {
  7150. if (GetRecyclerFlagsTable().CheckMemoryLeak)
  7151. {
  7152. ReportOnProcessDetach([=]() { this->CheckLeaks(header); });
  7153. }
  7154. }
  7155. #endif
  7156. #if defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7157. template <class Fn>
  7158. void
  7159. Recycler::ReportOnProcessDetach(Fn fn)
  7160. {
  7161. #if DBG
  7162. // Process detach can be done on any thread, just disable the thread check
  7163. this->markContext.GetPageAllocator()->SetDisableThreadAccessCheck();
  7164. #endif
  7165. #if ENABLE_CONCURRENT_GC
  7166. if (this->IsConcurrentState())
  7167. {
  7168. this->AbortConcurrent(true);
  7169. }
  7170. if (this->CollectionInProgress())
  7171. {
  7172. Output::Print(_u("WARNING: Thread terminated during GC. Can't dump object graph\n"));
  7173. return;
  7174. }
  7175. #else
  7176. Assert(!this->CollectionInProgress());
  7177. #endif
  7178. // Don't mark external roots on another thread
  7179. this->SetExternalRootMarker(NULL, NULL);
  7180. #if DBG
  7181. this->ResetThreadId();
  7182. #endif
  7183. fn();
  7184. }
  7185. #ifdef STACK_BACK_TRACE
  7186. void
  7187. Recycler::PrintPinnedObjectStackTraces()
  7188. {
  7189. pinnedObjectMap.Map([this](void * object, PinRecord const& pinRecord)
  7190. {
  7191. this->DumpObjectDescription(object);
  7192. Output::Print(_u("\n"));
  7193. StackBackTraceNode::PrintAll(pinRecord.stackBackTraces);
  7194. }
  7195. );
  7196. }
  7197. #endif
  7198. #endif
  7199. #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
  7200. void
  7201. Recycler::SetInDllCanUnloadNow()
  7202. {
  7203. inDllCanUnloadNow = true;
  7204. // Just clear out the root marker for the dump graph and report leaks
  7205. SetExternalRootMarker(NULL, NULL);
  7206. }
  7207. void
  7208. Recycler::SetInDetachProcess()
  7209. {
  7210. inDetachProcess = true;
  7211. // Just clear out the root marker for the dump graph and report leaks
  7212. SetExternalRootMarker(NULL, NULL);
  7213. }
  7214. #endif
  7215. #ifdef ENABLE_JS_ETW
  7216. ULONG Recycler::EventWriteFreeMemoryBlock(HeapBlock* heapBlock)
  7217. {
  7218. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK())
  7219. {
  7220. char* memoryAddress = NULL;
  7221. ULONG objectSize = 0;
  7222. ULONG blockSize = 0;
  7223. switch (heapBlock->GetHeapBlockType())
  7224. {
  7225. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  7226. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  7227. #ifdef RECYCLER_WRITE_BARRIER
  7228. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  7229. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  7230. #endif
  7231. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  7232. {
  7233. SmallHeapBlock* smallHeapBlock = static_cast<SmallHeapBlock*>(heapBlock);
  7234. memoryAddress = smallHeapBlock->GetAddress();
  7235. blockSize = (ULONG)(smallHeapBlock->GetEndAddress() - memoryAddress);
  7236. objectSize = smallHeapBlock->GetObjectSize();
  7237. }
  7238. break;
  7239. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  7240. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  7241. #ifdef RECYCLER_WRITE_BARRIER
  7242. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  7243. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  7244. #endif
  7245. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  7246. {
  7247. MediumHeapBlock* mediumHeapBlock = static_cast<MediumHeapBlock*>(heapBlock);
  7248. memoryAddress = mediumHeapBlock->GetAddress();
  7249. blockSize = (ULONG)(mediumHeapBlock->GetEndAddress() - memoryAddress);
  7250. objectSize = mediumHeapBlock->GetObjectSize();
  7251. }
  7252. case HeapBlock::HeapBlockType::LargeBlockType:
  7253. {
  7254. LargeHeapBlock* largeHeapBlock = static_cast<LargeHeapBlock*>(heapBlock);
  7255. memoryAddress = largeHeapBlock->GetBeginAddress();
  7256. blockSize = (ULONG)(largeHeapBlock->GetEndAddress() - memoryAddress);
  7257. objectSize = blockSize;
  7258. }
  7259. break;
  7260. default:
  7261. AssertMsg(FALSE, "invalid heapblock type");
  7262. }
  7263. EventWriteJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK(memoryAddress, blockSize, objectSize);
  7264. }
  7265. return S_OK;
  7266. }
  7267. void Recycler::FlushFreeRecord()
  7268. {
  7269. Assert(bulkFreeMemoryWrittenCount <= Recycler::BulkFreeMemoryCount);
  7270. JS_ETW(EventWriteJSCRIPT_RECYCLER_FREE_MEMORY(bulkFreeMemoryWrittenCount, sizeof(Recycler::ETWFreeRecord), etwFreeRecords));
  7271. bulkFreeMemoryWrittenCount = 0;
  7272. }
  7273. void Recycler::AppendFreeMemoryETWRecord(__in char *address, size_t size)
  7274. {
  7275. Assert(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7276. __analysis_assume(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
  7277. etwFreeRecords[bulkFreeMemoryWrittenCount].memoryAddress = address;
  7278. // TODO: change to size_t or uint64?
  7279. etwFreeRecords[bulkFreeMemoryWrittenCount].objectSize = (uint)size;
  7280. bulkFreeMemoryWrittenCount++;
  7281. if (bulkFreeMemoryWrittenCount == Recycler::BulkFreeMemoryCount)
  7282. {
  7283. FlushFreeRecord();
  7284. Assert(bulkFreeMemoryWrittenCount == 0);
  7285. }
  7286. }
  7287. #endif
  7288. #ifdef PROFILE_EXEC
  7289. ArenaAllocator *
  7290. Recycler::AddBackgroundProfilerArena()
  7291. {
  7292. return this->backgroundProfilerArena.PrependNode(&HeapAllocator::Instance,
  7293. _u("BgGCProfiler"), &this->backgroundProfilerPageAllocator, Js::Throw::OutOfMemory);
  7294. }
  7295. void
  7296. Recycler::ReleaseBackgroundProfilerArena(ArenaAllocator * arena)
  7297. {
  7298. this->backgroundProfilerArena.RemoveElement(&HeapAllocator::Instance, arena);
  7299. }
  7300. void
  7301. Recycler::SetProfiler(Js::Profiler * profiler, Js::Profiler * backgroundProfiler)
  7302. {
  7303. this->profiler = profiler;
  7304. this->backgroundProfiler = backgroundProfiler;
  7305. }
  7306. #endif
  7307. void Recycler::SetObjectBeforeCollectCallback(void* object,
  7308. ObjectBeforeCollectCallback callback,
  7309. void* callbackState,
  7310. ObjectBeforeCollectCallbackWrapper callbackWrapper,
  7311. void* threadContext)
  7312. {
  7313. if (objectBeforeCollectCallbackState == ObjectBeforeCollectCallback_Shutdown)
  7314. {
  7315. return; // NOP at shutdown
  7316. }
  7317. if (objectBeforeCollectCallbackMap == nullptr)
  7318. {
  7319. if (callback == nullptr) return;
  7320. objectBeforeCollectCallbackMap = HeapNew(ObjectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7321. }
  7322. // only allow 1 callback per object
  7323. objectBeforeCollectCallbackMap->Item(object, ObjectBeforeCollectCallbackData(callbackWrapper, callback, callbackState, threadContext));
  7324. if (callback != nullptr && this->IsInObjectBeforeCollectCallback()) // revive
  7325. {
  7326. this->ScanMemory<false>(&object, sizeof(object));
  7327. this->ProcessMark(/*background*/false);
  7328. }
  7329. }
  7330. bool Recycler::ProcessObjectBeforeCollectCallbacks(bool atShutdown/*= false*/)
  7331. {
  7332. if (this->objectBeforeCollectCallbackMap == nullptr)
  7333. {
  7334. return false; // no callbacks
  7335. }
  7336. Assert(atShutdown || this->IsMarkState());
  7337. Assert(!this->IsInObjectBeforeCollectCallback());
  7338. AutoRestoreValue<ObjectBeforeCollectCallbackState> autoInObjectBeforeCollectCallback(&objectBeforeCollectCallbackState,
  7339. atShutdown ? ObjectBeforeCollectCallback_Shutdown: ObjectBeforeCollectCallback_Normal);
  7340. // The callbacks may register/unregister callbacks while we are enumerating the current map. To avoid
  7341. // conflicting usage of the callback map, we swap it out. New registration will go to a new map.
  7342. AutoAllocatorObjectPtr<ObjectBeforeCollectCallbackMap, HeapAllocator> oldCallbackMap(
  7343. this->objectBeforeCollectCallbackMap, &HeapAllocator::Instance);
  7344. this->objectBeforeCollectCallbackMap = nullptr;
  7345. bool hasRemainingCallbacks = false;
  7346. oldCallbackMap->MapAndRemoveIf([&](const ObjectBeforeCollectCallbackMap::EntryType& entry)
  7347. {
  7348. const ObjectBeforeCollectCallbackData& data = entry.Value();
  7349. if (data.callback != nullptr)
  7350. {
  7351. void* object = entry.Key();
  7352. if (atShutdown || !this->IsObjectMarked(object))
  7353. {
  7354. if (data.callbackWrapper != nullptr)
  7355. {
  7356. data.callbackWrapper(data.callback, object, data.callbackState, data.threadContext);
  7357. }
  7358. else
  7359. {
  7360. data.callback(object, data.callbackState);
  7361. }
  7362. }
  7363. else
  7364. {
  7365. hasRemainingCallbacks = true;
  7366. return false; // Do not remove this entry, remaining callback for future
  7367. }
  7368. }
  7369. return true; // Remove this entry
  7370. });
  7371. // Merge back remaining callbacks if any
  7372. if (hasRemainingCallbacks)
  7373. {
  7374. if (this->objectBeforeCollectCallbackMap == nullptr)
  7375. {
  7376. this->objectBeforeCollectCallbackMap = oldCallbackMap.Detach();
  7377. }
  7378. else
  7379. {
  7380. if (oldCallbackMap->Count() > this->objectBeforeCollectCallbackMap->Count())
  7381. {
  7382. // Swap so that oldCallbackMap is the smaller one
  7383. ObjectBeforeCollectCallbackMap* tmp = oldCallbackMap.Detach();
  7384. *&oldCallbackMap = this->objectBeforeCollectCallbackMap;
  7385. this->objectBeforeCollectCallbackMap = tmp;
  7386. }
  7387. oldCallbackMap->Map([&](void* object, const ObjectBeforeCollectCallbackData& data)
  7388. {
  7389. this->objectBeforeCollectCallbackMap->Item(object, data);
  7390. });
  7391. }
  7392. }
  7393. return true; // maybe called callbacks
  7394. }
  7395. void Recycler::ClearObjectBeforeCollectCallbacks()
  7396. {
  7397. // This is called at shutting down. All objects will be gone. Invoke each registered callback if any.
  7398. ProcessObjectBeforeCollectCallbacks(/*atShutdown*/true);
  7399. Assert(objectBeforeCollectCallbackMap == nullptr);
  7400. }
  7401. #ifdef RECYCLER_TEST_SUPPORT
  7402. void Recycler::SetCheckFn(BOOL(*checkFn)(char* addr, size_t size))
  7403. {
  7404. Assert(BinaryFeatureControl::RecyclerTest());
  7405. this->EnsureNotCollecting();
  7406. this->checkFn = checkFn;
  7407. }
  7408. #endif
  7409. void
  7410. Recycler::NotifyFree(__in char *address, size_t size)
  7411. {
  7412. RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Sweeping object %p\n"), address);
  7413. #ifdef RECYCLER_TEST_SUPPORT
  7414. if (BinaryFeatureControl::RecyclerTest())
  7415. {
  7416. if (checkFn != NULL)
  7417. checkFn(address, size);
  7418. }
  7419. #endif
  7420. #ifdef ENABLE_JS_ETW
  7421. if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY())
  7422. {
  7423. AppendFreeMemoryETWRecord(address, (UINT)size);
  7424. }
  7425. #endif
  7426. RecyclerMemoryTracking::ReportFree(this, address, size);
  7427. RECYCLER_PERF_COUNTER_DEC(LiveObject);
  7428. RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, size);
  7429. RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, size);
  7430. if (HeapInfo::IsSmallBlockAllocation(HeapInfo::GetAlignedSizeNoCheck(size)))
  7431. {
  7432. RECYCLER_PERF_COUNTER_DEC(SmallHeapBlockLiveObject);
  7433. RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockLiveObjectSize, size);
  7434. RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockFreeObjectSize, size);
  7435. }
  7436. else
  7437. {
  7438. RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockLiveObject);
  7439. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, size);
  7440. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockFreeObjectSize, size);
  7441. }
  7442. #ifdef RECYCLER_MEMORY_VERIFY
  7443. if (this->VerifyEnabled())
  7444. {
  7445. VerifyCheckPad(address, size);
  7446. }
  7447. #endif
  7448. #ifdef PROFILE_RECYCLER_ALLOC
  7449. if (!CONFIG_FLAG(KeepRecyclerTrackData))
  7450. {
  7451. TrackFree(address, size);
  7452. }
  7453. #endif
  7454. #ifdef RECYCLER_STATS
  7455. collectionStats.objectSweptCount++;
  7456. collectionStats.objectSweptBytes += size;
  7457. if (!isForceSweeping)
  7458. {
  7459. collectionStats.objectSweptFreeListCount++;
  7460. collectionStats.objectSweptFreeListBytes += size;
  7461. }
  7462. #endif
  7463. }
  7464. #if DBG
  7465. void
  7466. Recycler::WBSetBit(char* addr)
  7467. {
  7468. Recycler* recycler = Recycler::recyclerList;
  7469. while (recycler)
  7470. {
  7471. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7472. if (heapBlock)
  7473. {
  7474. heapBlock->WBSetBit(addr);
  7475. break;
  7476. }
  7477. recycler = recycler->next;
  7478. }
  7479. }
  7480. void
  7481. Recycler::WBSetBits(char* addr, uint length)
  7482. {
  7483. Recycler* recycler = Recycler::recyclerList;
  7484. while (recycler)
  7485. {
  7486. auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
  7487. if (heapBlock)
  7488. {
  7489. heapBlock->WBSetBits(addr, length);
  7490. }
  7491. recycler = recycler->next;
  7492. }
  7493. }
  7494. #endif
  7495. size_t
  7496. RecyclerHeapObjectInfo::GetSize() const
  7497. {
  7498. Assert(m_heapBlock);
  7499. size_t size;
  7500. #if LARGEHEAPBLOCK_ENCODING
  7501. if (isUsingLargeHeapBlock)
  7502. {
  7503. size = m_largeHeapBlockHeader->objectSize;
  7504. }
  7505. #else
  7506. if (m_heapBlock->IsLargeHeapBlock())
  7507. {
  7508. size = ((LargeHeapBlock*)m_heapBlock)->GetObjectSize(m_address);
  7509. }
  7510. #endif
  7511. else
  7512. {
  7513. // All small heap block types have the same layout for the object size field.
  7514. size = ((SmallHeapBlock*)m_heapBlock)->GetObjectSize();
  7515. }
  7516. #ifdef RECYCLER_MEMORY_VERIFY
  7517. if (m_recycler->VerifyEnabled())
  7518. {
  7519. size -= *(size_t *)(((char *)m_address) + size - sizeof(size_t));
  7520. }
  7521. #endif
  7522. return size;
  7523. }
  7524. template char* Recycler::AllocWithAttributesInlined<(Memory::ObjectInfoBits)32, false>(size_t);