| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584 |
- //-------------------------------------------------------------------------------------------------------
- // Copyright (C) Microsoft. All rights reserved.
- // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
- //-------------------------------------------------------------------------------------------------------
- #include "CommonMemoryPch.h"
- #ifdef _M_AMD64
- #include "amd64.h"
- #endif
- #ifdef _M_ARM
- #include "arm.h"
- #endif
- #ifdef _M_ARM64
- #include "arm64.h"
- #endif
- #include "Core/BinaryFeatureControl.h"
- #include "Common/ThreadService.h"
- #include "Memory/AutoAllocatorObjectPtr.h"
- DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
- #ifdef PROFILE_RECYCLER_ALLOC
- struct UnallocatedPortionOfBumpAllocatedBlock
- {
- };
- struct ExplicitFreeListedObject
- {
- };
- Recycler::TrackerData Recycler::TrackerData::EmptyData(&typeid(UnallocatedPortionOfBumpAllocatedBlock), false);
- Recycler::TrackerData Recycler::TrackerData::ExplicitFreeListObjectData(&typeid(ExplicitFreeListedObject), false);
- #endif
- enum ETWEventGCActivationKind : unsigned
- {
- ETWEvent_GarbageCollect = 0, // force in-thread GC
- ETWEvent_ThreadCollect = 1, // thread GC with wait
- ETWEvent_ConcurrentCollect = 2,
- ETWEvent_PartialCollect = 3,
- ETWEvent_ConcurrentMark = 11,
- ETWEvent_ConcurrentRescan = 12,
- ETWEvent_ConcurrentSweep = 13,
- ETWEvent_ConcurrentTransferSwept = 14,
- ETWEvent_ConcurrentFinishMark = 15,
- };
- DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
- inline bool
- DefaultRecyclerCollectionWrapper::IsCollectionDisabled(Recycler * recycler)
- {
- // GC shouldn't be triggered during heap enum, unless we missed a case where it allocate memory (which
- // shouldn't happen during heap enum) or for the case we explicitly allow allocation
- // REVIEW: isHeapEnumInProgress should have been a collection state and checked before to avoid a check here.
- // Collection will be disabled in VarDispEx because it could be called from projection re-entrance as ASTA allows
- // QI/AddRef/Release to come back.
- bool collectionDisabled = recycler->IsCollectionDisabled();
- #if DBG
- if (collectionDisabled)
- {
- // disabled collection should only happen if we allowed allocation during heap enum
- if (recycler->IsHeapEnumInProgress())
- {
- Assert(recycler->AllowAllocationDuringHeapEnum());
- }
- else
- {
- #ifdef ENABLE_PROJECTION
- Assert(recycler->IsInRefCountTrackingForProjection());
- #else
- Assert(false);
- #endif
- }
- }
- #endif
- return collectionDisabled;
- }
- BOOL DefaultRecyclerCollectionWrapper::ExecuteRecyclerCollectionFunction(Recycler * recycler, CollectionFunction function, CollectionFlags flags)
- {
- if (IsCollectionDisabled(recycler))
- {
- return FALSE;
- }
- BOOL ret = FALSE;
- BEGIN_NO_EXCEPTION
- {
- ret = (recycler->*(function))(flags);
- }
- END_NO_EXCEPTION;
- return ret;
- }
- void
- DefaultRecyclerCollectionWrapper::DisposeObjects(Recycler * recycler)
- {
- if (IsCollectionDisabled(recycler))
- {
- return;
- }
- BEGIN_NO_EXCEPTION
- {
- recycler->DisposeObjects();
- }
- END_NO_EXCEPTION;
- }
- static void* GetStackBase();
- template _ALWAYSINLINE char * Recycler::AllocWithAttributesInlined<NoBit, false>(size_t size);
- template _ALWAYSINLINE char* Recycler::RealAlloc<NoBit, false>(HeapInfo* heap, size_t size);
- template _ALWAYSINLINE _Ret_notnull_ void * __cdecl operator new<Recycler>(size_t byteSize, Recycler * alloc, char * (Recycler::*AllocFunc)(size_t));
- Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllocator * pageAllocator, void (*outOfMemoryFunc)(), Js::ConfigFlagsTable& configFlagsTable) :
- collectionState(CollectionStateNotCollecting),
- recyclerFlagsTable(configFlagsTable),
- recyclerPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, RecyclerHeuristic::Instance.DefaultMaxAllocPageCount),
- recyclerLargeBlockPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount),
- threadService(nullptr),
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator(this, policyManager, configFlagsTable, RecyclerHeuristic::Instance.DefaultMaxFreePageCount, PageAllocator::DefaultMaxAllocPageCount, true),
- #endif
- threadPageAllocator(pageAllocator),
- markPagePool(configFlagsTable),
- parallelMarkPagePool1(configFlagsTable),
- parallelMarkPagePool2(configFlagsTable),
- parallelMarkPagePool3(configFlagsTable),
- markContext(this, &this->markPagePool),
- parallelMarkContext1(this, &this->parallelMarkPagePool1),
- parallelMarkContext2(this, &this->parallelMarkPagePool2),
- parallelMarkContext3(this, &this->parallelMarkPagePool3),
- #if ENABLE_PARTIAL_GC
- clientTrackedObjectAllocator(_u("CTO-List"), GetPageAllocator(), Js::Throw::OutOfMemory),
- #endif
- outOfMemoryFunc(outOfMemoryFunc),
- #ifdef RECYCLER_TEST_SUPPORT
- checkFn(NULL),
- #endif
- externalRootMarker(NULL),
- externalRootMarkerContext(NULL),
- recyclerSweep(nullptr),
- inEndMarkOnLowMemory(false),
- enableScanInteriorPointers(CUSTOM_CONFIG_FLAG(configFlagsTable, RecyclerForceMarkInterior)),
- enableScanImplicitRoots(false),
- disableCollectOnAllocationHeuristics(false),
- skipStack(false),
- mainThreadHandle(NULL),
- #if ENABLE_CONCURRENT_GC
- backgroundFinishMarkCount(0),
- hasPendingUnpinnedObject(false),
- hasPendingConcurrentFindRoot(false),
- queueTrackedObject(false),
- enableConcurrentMark(false), // Default to non-concurrent
- enableParallelMark(false),
- enableConcurrentSweep(false),
- concurrentThread(NULL),
- concurrentWorkReadyEvent(NULL),
- concurrentWorkDoneEvent(NULL),
- parallelThread1(this, &Recycler::ParallelWorkFunc<0>),
- parallelThread2(this, &Recycler::ParallelWorkFunc<1>),
- priorityBoost(false),
- isAborting(false),
- #if DBG
- concurrentThreadExited(true),
- isProcessingTrackedObjects(false),
- hasIncompleteDoCollect(false),
- isConcurrentGCOnIdle(false),
- isFinishGCOnIdle(false),
- #endif
- #ifdef IDLE_DECOMMIT_ENABLED
- concurrentIdleDecommitEvent(nullptr),
- #endif
- #endif
- #if DBG
- isExternalStackSkippingGC(false),
- isProcessingRescan(false),
- #endif
- #if ENABLE_PARTIAL_GC
- inPartialCollectMode(false),
- scanPinnedObjectMap(false),
- partialUncollectedAllocBytes(0),
- uncollectedNewPageCountPartialCollect((size_t)-1),
- #if ENABLE_CONCURRENT_GC
- partialConcurrentNextCollection(false),
- #endif
- #ifdef RECYCLER_STRESS
- forcePartialScanStack(false),
- #endif
- #endif
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- isPrimaryMarkContextInitialized(false),
- #endif
- allowDispose(false),
- inDisposeWrapper(false),
- hasDisposableObject(false),
- tickCountNextDispose(0),
- hasPendingTransferDisposedObjects(false),
- transientPinnedObject(nullptr),
- pinnedObjectMap(1024, HeapAllocator::GetNoMemProtectInstance()),
- weakReferenceMap(1024, HeapAllocator::GetNoMemProtectInstance()),
- weakReferenceCleanupId(0),
- collectionWrapper(&DefaultRecyclerCollectionWrapper::Instance),
- isScriptActive(false),
- isInScript(false),
- isShuttingDown(false),
- inExhaustiveCollection(false),
- hasExhaustiveCandidate(false),
- inDecommitNowCollection(false),
- inCacheCleanupCollection(false),
- hasPendingDeleteGuestArena(false),
- needOOMRescan(false),
- #if ENABLE_CONCURRENT_GC && ENABLE_PARTIAL_GC
- hasBackgroundFinishPartial(false),
- #endif
- decommitOnFinish(false)
- #ifdef PROFILE_EXEC
- , profiler(nullptr)
- , backgroundProfiler(nullptr)
- , backgroundProfilerPageAllocator(nullptr, configFlagsTable, PageAllocatorType_GCThread)
- , backgroundProfilerArena()
- #endif
- #ifdef PROFILE_MEM
- , memoryData(nullptr)
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- , objectGraphDumper(nullptr)
- , dumpObjectOnceOnCollect(false)
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- , trackerDictionary(nullptr)
- #endif
- #ifdef HEAP_ENUMERATION_VALIDATION
- ,pfPostHeapEnumScanCallback(nullptr)
- #endif
- #ifdef NTBUILD
- , telemetryBlock(&localTelemetryBlock)
- #endif
- #ifdef ENABLE_JS_ETW
- ,bulkFreeMemoryWrittenCount(0)
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- , isPageHeapEnabled(false)
- , capturePageHeapAllocStack(false)
- , capturePageHeapFreeStack(false)
- #endif
- , objectBeforeCollectCallbackMap(nullptr)
- , objectBeforeCollectCallbackState(ObjectBeforeCollectCallback_None)
- {
- #ifdef RECYCLER_MARK_TRACK
- this->markMap = NoCheckHeapNew(MarkMap, &NoCheckHeapAllocator::Instance, 163, &markMapCriticalSection);
- markContext.SetMarkMap(markMap);
- parallelMarkContext1.SetMarkMap(markMap);
- parallelMarkContext2.SetMarkMap(markMap);
- parallelMarkContext3.SetMarkMap(markMap);
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- verifyPad = GetRecyclerFlagsTable().RecyclerVerifyPadSize;
- verifyEnabled = GetRecyclerFlagsTable().IsEnabled(Js::RecyclerVerifyFlag);
- if (verifyEnabled)
- {
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->EnableVerify();
- });
- }
- #endif
- #ifdef RECYCLER_NO_PAGE_REUSE
- if (GetRecyclerFlagsTable().IsEnabled(Js::RecyclerNoPageReuseFlag))
- {
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->DisablePageReuse();
- });
- }
- #endif
- this->inDispose = false;
- #if DBG
- this->heapBlockCount = 0;
- this->collectionCount = 0;
- this->disableThreadAccessCheck = false;
- #if ENABLE_CONCURRENT_GC
- this->disableConcurrentThreadExitedCheck = false;
- #endif
- #endif
- #if DBG || defined RECYCLER_TRACE
- this->inResolveExternalWeakReferences = false;
- #endif
- #if DBG || defined(RECYCLER_STATS)
- isForceSweeping = false;
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- collectionStats.finalizeCount = 0;
- #endif
- RecyclerMemoryTracking::ReportRecyclerCreate(this);
- #if DBG_DUMP
- forceTraceMark = false;
- recyclerPageAllocator.debugName = _u("Recycler");
- recyclerLargeBlockPageAllocator.debugName = _u("RecyclerLargeBlock");
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.debugName = _u("RecyclerWithBarrier");
- #endif
- #endif
- isHeapEnumInProgress = false;
- isCollectionDisabled = false;
- #if DBG
- allowAllocationDuringRenentrance = false;
- allowAllocationDuringHeapEnum = false;
- #ifdef ENABLE_PROJECTION
- isInRefCountTrackingForProjection = false;
- #endif
- #endif
- ScheduleNextCollection();
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- this->inDllCanUnloadNow = false;
- this->inDetachProcess = false;
- #endif
- #ifdef NTBUILD
- memset(&localTelemetryBlock, 0, sizeof(localTelemetryBlock));
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // recycler requires at least Recycler::PrimaryMarkStackReservedPageCount to function properly for the main mark context
- this->markContext.SetMaxPageCount(max(static_cast<size_t>(GetRecyclerFlagsTable().MaxMarkStackPageCount), static_cast<size_t>(Recycler::PrimaryMarkStackReservedPageCount)));
- this->parallelMarkContext1.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- this->parallelMarkContext2.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- this->parallelMarkContext3.SetMaxPageCount(GetRecyclerFlagsTable().MaxMarkStackPageCount);
- if (GetRecyclerFlagsTable().IsEnabled(Js::GCMemoryThresholdFlag))
- {
- // Note, we can't do this in the constructor for RecyclerHeuristic::Instance because it runs before config is processed
- RecyclerHeuristic::Instance.ConfigureBaseFactor(GetRecyclerFlagsTable().GCMemoryThreshold);
- }
- #endif
- }
- #if DBG
- void
- Recycler::SetDisableThreadAccessCheck()
- {
- recyclerPageAllocator.SetDisableThreadAccessCheck();
- recyclerLargeBlockPageAllocator.SetDisableThreadAccessCheck();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.SetDisableThreadAccessCheck();
- #endif
- disableThreadAccessCheck = true;
- }
- #endif
- void
- Recycler::SetMemProtectMode()
- {
- this->enableScanInteriorPointers = true;
- this->enableScanImplicitRoots = true;
- this->disableCollectOnAllocationHeuristics = true;
- #ifdef RECYCLER_STRESS
- this->recyclerStress = GetRecyclerFlagsTable().MemProtectHeapStress;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = GetRecyclerFlagsTable().MemProtectHeapBackgroundStress;
- this->recyclerConcurrentStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentStress;
- this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().MemProtectHeapConcurrentRepeatStress;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = GetRecyclerFlagsTable().MemProtectHeapPartialStress;
- #endif
- #endif
- }
- void
- Recycler::LogMemProtectHeapSize(bool fromGC)
- {
- Assert(IsMemProtectMode());
- #ifdef ENABLE_JS_ETW
- if (IS_JS_ETW(EventEnabledMEMPROTECT_GC_HEAP_SIZE()))
- {
- IdleDecommitPageAllocator* recyclerPageAllocator = GetRecyclerPageAllocator();
- IdleDecommitPageAllocator* recyclerLeafPageAllocator = GetRecyclerLeafPageAllocator();
- IdleDecommitPageAllocator* recyclerLargeBlockPageAllocator = GetRecyclerLargeBlockPageAllocator();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- IdleDecommitPageAllocator* recyclerWithBarrierPageAllocator = GetRecyclerWithBarrierPageAllocator();
- #endif
- size_t usedBytes = (recyclerPageAllocator->usedBytes + recyclerLeafPageAllocator->usedBytes +
- recyclerLargeBlockPageAllocator->usedBytes);
- size_t reservedBytes = (recyclerPageAllocator->reservedBytes + recyclerLeafPageAllocator->reservedBytes +
- recyclerLargeBlockPageAllocator->reservedBytes);
- size_t committedBytes = (recyclerPageAllocator->committedBytes + recyclerLeafPageAllocator->committedBytes +
- recyclerLargeBlockPageAllocator->committedBytes);
- size_t numberOfSegments = (recyclerPageAllocator->numberOfSegments +
- recyclerLeafPageAllocator->numberOfSegments +
- recyclerLargeBlockPageAllocator->numberOfSegments);
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- usedBytes += recyclerWithBarrierPageAllocator->usedBytes;
- reservedBytes += recyclerWithBarrierPageAllocator->reservedBytes;
- committedBytes += recyclerWithBarrierPageAllocator->committedBytes;
- numberOfSegments += recyclerWithBarrierPageAllocator->numberOfSegments;
- #endif
- JS_ETW(EventWriteMEMPROTECT_GC_HEAP_SIZE(this, usedBytes, reservedBytes, committedBytes, numberOfSegments, fromGC));
- }
- #endif
- }
- #if DBG
- void
- Recycler::SetDisableConcurrentThreadExitedCheck()
- {
- #if ENABLE_CONCURRENT_GC
- disableConcurrentThreadExitedCheck = true;
- #endif
- #ifdef RECYCLER_STRESS
- this->recyclerStress = false;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = false;
- this->recyclerConcurrentStress = false;
- this->recyclerConcurrentRepeatStress = false;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = false;
- #endif
- #endif
- }
- #endif
- #if DBG
- void
- Recycler::ResetThreadId()
- {
- // Transfer all the page allocator to the current thread id
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->ClearConcurrentThreadId();
- });
- #if ENABLE_CONCURRENT_GC
- if (this->IsConcurrentEnabled())
- {
- markContext.GetPageAllocator()->ClearConcurrentThreadId();
- }
- #endif
- #if defined(DBG) && defined(PROFILE_EXEC)
- this->backgroundProfilerPageAllocator.ClearConcurrentThreadId();
- #endif
- }
- #endif
- Recycler::~Recycler()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->isAborting);
- #endif
- #if DBG
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- if (recyclerList == this)
- {
- recyclerList = this->next;
- }
- else
- {
- Recycler* list = recyclerList;
- while (list->next != this)
- {
- list = list->next;
- }
- list->next = this->next;
- }
- }
- #endif
- // Stop any further collection
- this->isShuttingDown = true;
- #if DBG
- this->ResetThreadId();
- #endif
- #ifdef ENABLE_JS_ETW
- FlushFreeRecord();
- #endif
- ClearObjectBeforeCollectCallbacks();
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (GetRecyclerFlagsTable().DumpObjectGraphOnExit)
- {
- // Always skip stack here, as we may be running the dtor on another thread.
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.skipStack = true;
- this->DumpObjectGraph(¶m);
- }
- #endif
- AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): %s"), this, this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") :
- this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
- #ifdef LEAK_REPORT
- ReportLeaks();
- #endif
- #ifdef CHECK_MEMORY_LEAK
- CheckLeaks(this->IsInDllCanUnloadNow()? _u("DllCanUnloadNow") : this->IsInDetachProcess()? _u("DetachProcess") : _u("Destructor"));
- #endif
- AUTO_LEAK_REPORT_SECTION_0(this->GetRecyclerFlagsTable(), _u("Skipped finalizers"));
- #if ENABLE_CONCURRENT_GC
- Assert(concurrentThread == nullptr);
- // We only sometime clean up the state after abort concurrent to not collection
- // Still need to delete heap block that is held by the recyclerSweep
- if (recyclerSweep != nullptr)
- {
- recyclerSweep->ShutdownCleanup();
- recyclerSweep = nullptr;
- }
- if (mainThreadHandle != nullptr)
- {
- CloseHandle(mainThreadHandle);
- }
- #endif
- recyclerPageAllocator.Close();
- recyclerLargeBlockPageAllocator.Close();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.Close();
- #endif
- markContext.Release();
- parallelMarkContext1.Release();
- parallelMarkContext2.Release();
- parallelMarkContext3.Release();
- // Clean up the weak reference map so that
- // objects being finalized can safely refer to weak references
- // (this could otherwise become a problem for weak references held
- // to large objects since their block would be destroyed before
- // the finalizer was run)
- // When the recycler is shutting down, all objects are going to be reclaimed
- // so null out the weak references so that anyone relying on weak
- // references simply thinks the object has been reclaimed
- weakReferenceMap.Map([](RecyclerWeakReferenceBase * weakRef) -> bool
- {
- weakRef->strongRef = nullptr;
- // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
- weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
- // Remove
- return false;
- });
- #if ENABLE_PARTIAL_GC
- clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- this->trackerDictionary->Map([](type_info const *, TrackerItem * item)
- {
- NoCheckHeapDelete(item);
- });
- NoCheckHeapDelete(this->trackerDictionary);
- this->trackerDictionary = nullptr;
- ::DeleteCriticalSection(&trackerCriticalSection);
- }
- #endif
- #ifdef RECYCLER_MARK_TRACK
- NoCheckHeapDelete(this->markMap);
- this->markMap = nullptr;
- #endif
- #if DBG
- // Disable idle decommit asserts
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->ShutdownIdleDecommit();
- });
- #endif
- Assert(this->collectionState == CollectionStateExit || this->collectionState == CollectionStateNotCollecting);
- #if ENABLE_CONCURRENT_GC
- Assert(this->disableConcurrentThreadExitedCheck || this->concurrentThreadExited == true);
- #endif
- }
- void
- Recycler::SetIsThreadBound()
- {
- Assert(mainThreadHandle == nullptr);
- ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), &mainThreadHandle,
- 0, FALSE, DUPLICATE_SAME_ACCESS);
- stackBase = GetStackBase();
- }
- void
- Recycler::RootAddRef(void* obj, uint *count)
- {
- Assert(this->IsValidObject(obj));
- if (transientPinnedObject)
- {
- PinRecord& refCount = pinnedObjectMap.GetReference(transientPinnedObject);
- ++refCount;
- if (refCount == 1)
- {
- this->scanPinnedObjectMap = true;
- RECYCLER_PERF_COUNTER_INC(PinnedObject);
- }
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount.stackBackTraces,
- transientPinnedObjectStackBackTrace);
- }
- #endif
- #endif
- }
- if (count != nullptr)
- {
- PinRecord* refCount = pinnedObjectMap.TryGetReference(obj);
- *count = (refCount != nullptr) ? (*refCount + 1) : 1;
- }
- transientPinnedObject = obj;
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- transientPinnedObjectStackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance);
- }
- #endif
- #endif
- }
- void
- Recycler::RootRelease(void* obj, uint *count)
- {
- Assert(this->IsValidObject(obj));
- if (transientPinnedObject == obj)
- {
- transientPinnedObject = nullptr;
- if (count != nullptr)
- {
- PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
- *count = (refCount != nullptr) ? *refCount : 0;
- }
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- transientPinnedObjectStackBackTrace->Delete(&NoCheckHeapAllocator::Instance);
- }
- #endif
- #endif
- }
- else
- {
- PinRecord *refCount = pinnedObjectMap.TryGetReference(obj);
- if (refCount == nullptr)
- {
- if (count != nullptr)
- {
- *count = (uint)-1;
- }
- // REVIEW: throw if not found
- Assert(false);
- return;
- }
- uint newRefCount = (--(*refCount));
- if (count != nullptr)
- {
- *count = newRefCount;
- }
- if (newRefCount != 0)
- {
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- StackBackTraceNode::Prepend(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces,
- StackBackTrace::Capture(&NoCheckHeapAllocator::Instance));
- }
- #endif
- #endif
- return;
- }
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- StackBackTraceNode::DeleteAll(&NoCheckHeapAllocator::Instance, refCount->stackBackTraces);
- refCount->stackBackTraces = nullptr;
- #endif
- #endif
- #if ENABLE_CONCURRENT_GC
- // Don't delete the entry if we are in concurrent find root state
- // We will delete it later on in-thread find root
- if (this->hasPendingConcurrentFindRoot)
- {
- this->hasPendingUnpinnedObject = true;
- }
- else
- #endif
- {
- pinnedObjectMap.Remove(obj);
- }
- RECYCLER_PERF_COUNTER_DEC(PinnedObject);
- }
- // Not a real collection. This doesn't activate GC.
- // This tell the GC that we have an exhaustive candidate, and should trigger
- // another GC if there is an exhaustive GC going on.
- this->CollectNow<CollectExhaustiveCandidate>();
- }
- #if DBG
- Recycler* Recycler::recyclerList = nullptr;
- #endif
- void
- Recycler::Initialize(const bool forceInThread, JsUtil::ThreadService *threadService, const bool deferThreadStartup
- #ifdef RECYCLER_PAGE_HEAP
- , PageHeapMode pageheapmode
- , bool captureAllocCallStack
- , bool captureFreeCallStack
- #endif
- )
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- this->InitializeProfileAllocTracker();
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- this->disableCollection = CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::RecyclerPhase);
- #endif
- #if ENABLE_CONCURRENT_GC
- this->skipStack = false;
- #endif
- #if ENABLE_PARTIAL_GC
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- this->enablePartialCollect = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::PartialCollectPhase);
- #else
- this->enablePartialCollect = true;
- #endif
- #endif
- #ifdef PROFILE_MEM
- this->memoryData = MemoryProfiler::GetRecyclerMemoryData();
- #endif
- #if DBG || DBG_DUMP || defined(RECYCLER_TRACE)
- mainThreadId = GetCurrentThreadContextId();
- #endif
- #ifdef RECYCLER_TRACE
- collectionParam.domCollect = false;
- #endif
- #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING)
- bool dontNeedDetailedTracking = false;
- #if defined(PROFILE_RECYCLER_ALLOC)
- dontNeedDetailedTracking = dontNeedDetailedTracking || this->trackerDictionary == nullptr;
- #endif
- #if defined(RECYCLER_MEMORY_VERIFY)
- dontNeedDetailedTracking = dontNeedDetailedTracking || !this->verifyEnabled;
- #endif
- // If we need detailed tracking we force allocation fast path in the JIT to fail and go to the helper, so there is no
- // need for the TrackNativeAllocatedMemoryBlock callback.
- if (dontNeedDetailedTracking)
- {
- autoHeap.Initialize(this, TrackNativeAllocatedMemoryBlock
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- }
- else
- {
- autoHeap.Initialize(this
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- }
- #else
- autoHeap.Initialize(this
- #ifdef RECYCLER_PAGE_HEAP
- , pageheapmode
- , captureAllocCallStack
- , captureFreeCallStack
- #endif
- );
- #endif
- markContext.Init(Recycler::PrimaryMarkStackReservedPageCount);
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- isPrimaryMarkContextInitialized = true;
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- isPageHeapEnabled = autoHeap.IsPageHeapEnabled();
- if (IsPageHeapEnabled())
- {
- capturePageHeapAllocStack = autoHeap.captureAllocCallStack;
- capturePageHeapFreeStack = autoHeap.captureFreeCallStack;
- }
- #endif
- #ifdef RECYCLER_STRESS
- #if ENABLE_PARTIAL_GC
- if (GetRecyclerFlagsTable().RecyclerTrackStress)
- {
- // Disable partial if we are doing track stress, since partial relies on ClientTracked processing
- // and track stress doesn't support this.
- this->enablePartialCollect = false;
- }
- #endif
- this->recyclerStress = GetRecyclerFlagsTable().RecyclerStress;
- #if ENABLE_CONCURRENT_GC
- this->recyclerBackgroundStress = GetRecyclerFlagsTable().RecyclerBackgroundStress;
- this->recyclerConcurrentStress = GetRecyclerFlagsTable().RecyclerConcurrentStress;
- this->recyclerConcurrentRepeatStress = GetRecyclerFlagsTable().RecyclerConcurrentRepeatStress;
- #endif
- #if ENABLE_PARTIAL_GC
- this->recyclerPartialStress = GetRecyclerFlagsTable().RecyclerPartialStress;
- #endif
- #endif
- #ifdef RECYCLER_WRITE_WATCH
- bool needWriteWatch = false;
- #endif
- #if ENABLE_CONCURRENT_GC
- // Default to non-concurrent
- uint numProcs = (uint)AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
- this->maxParallelism = (numProcs > 4) || CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) ? 4 : numProcs;
- if (forceInThread)
- {
- // Requested a non-concurrent recycler
- this->disableConcurrent = true;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentCollectPhase))
- {
- // Concurrent collection disabled
- this->disableConcurrent = true;
- }
- else if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase) &&
- CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase) &&
- CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase))
- {
- // All concurrent collection phases disabled
- this->disableConcurrent = true;
- }
- #endif
- else
- {
- this->disableConcurrent = false;
- if (deferThreadStartup || EnableConcurrent(threadService, false))
- {
- #ifdef RECYCLER_WRITE_WATCH
- needWriteWatch = true;
- #endif
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- #ifdef RECYCLER_WRITE_WATCH
- needWriteWatch = true;
- #endif
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- if (needWriteWatch)
- {
- // need write watch to support concurrent and/or partial collection
- recyclerPageAllocator.EnableWriteWatch();
- recyclerLargeBlockPageAllocator.EnableWriteWatch();
- }
- }
- #endif
- #else
- Assert(!needWriteWatch);
- #endif
- #if DBG
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- this->next = recyclerList;
- recyclerList = this;
- }
- #endif
- }
- BOOL
- Recycler::CollectionInProgress() const
- {
- return collectionState != CollectionStateNotCollecting;
- }
- BOOL
- Recycler::IsExiting() const
- {
- return (collectionState == Collection_Exit);
- }
- BOOL
- Recycler::IsSweeping() const
- {
- return ((collectionState & Collection_Sweep) == Collection_Sweep);
- }
- void
- Recycler::SetIsScriptActive(bool isScriptActive)
- {
- Assert(this->isInScript);
- Assert(this->isScriptActive != isScriptActive);
- this->isScriptActive = isScriptActive;
- if (isScriptActive)
- {
- this->tickCountNextDispose = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
- }
- }
- void
- Recycler::SetIsInScript(bool isInScript)
- {
- Assert(this->isInScript != isInScript);
- this->isInScript = isInScript;
- }
- bool
- Recycler::NeedOOMRescan() const
- {
- return this->needOOMRescan;
- }
- void
- Recycler::SetNeedOOMRescan()
- {
- this->needOOMRescan = true;
- }
- void
- Recycler::ClearNeedOOMRescan()
- {
- this->needOOMRescan = false;
- markContext.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext1.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext2.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- parallelMarkContext3.GetPageAllocator()->ResetDisableAllocationOutOfMemory();
- }
- bool
- Recycler::IsMemProtectMode()
- {
- return this->enableScanImplicitRoots;
- }
- size_t
- Recycler::GetUsedBytes()
- {
- size_t usedBytes = threadPageAllocator->usedBytes;
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- usedBytes += recyclerWithBarrierPageAllocator.usedBytes;
- #endif
- usedBytes += recyclerPageAllocator.usedBytes;
- usedBytes += recyclerLargeBlockPageAllocator.usedBytes;
- #if GLOBAL_ENABLE_WRITE_BARRIER
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- Assert(recyclerPageAllocator.usedBytes == 0);
- }
- #endif
- return usedBytes;
- }
- IdleDecommitPageAllocator*
- Recycler::GetRecyclerPageAllocator()
- {
- // TODO: SWB this is for Finalizable leaf allocation, which we didn't implement leaf bucket for it
- // remove this after the finalizable leaf bucket is implemented
- #if GLOBAL_ENABLE_WRITE_BARRIER
- if (CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- return &this->recyclerWithBarrierPageAllocator;
- }
- else
- #endif
- {
- #ifdef RECYCLER_WRITE_WATCH
- return &this->recyclerPageAllocator;
- #else
- return &this->recyclerWithBarrierPageAllocator;
- #endif
- }
- }
- IdleDecommitPageAllocator*
- Recycler::GetRecyclerLargeBlockPageAllocator()
- {
- return &this->recyclerLargeBlockPageAllocator;
- }
- IdleDecommitPageAllocator*
- Recycler::GetRecyclerLeafPageAllocator()
- {
- return this->threadPageAllocator;
- }
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- IdleDecommitPageAllocator*
- Recycler::GetRecyclerWithBarrierPageAllocator()
- {
- return &this->recyclerWithBarrierPageAllocator;
- }
- #endif
- #if DBG
- BOOL
- Recycler::IsFreeObject(void * candidate)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(candidate);
- if (heapBlock != NULL)
- {
- return heapBlock->IsFreeObject(candidate);
- }
- return false;
- }
- #endif
- BOOL
- Recycler::IsValidObject(void* candidate, size_t minimumSize)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(candidate);
- if (heapBlock != NULL)
- {
- return heapBlock->IsValidObject(candidate) && (minimumSize == 0 || heapBlock->GetObjectSize(candidate) >= minimumSize);
- }
- return false;
- }
- void
- Recycler::Prime()
- {
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (GetRecyclerFlagsTable().IsEnabled(Js::ForceFragmentAddressSpaceFlag))
- {
- // Never prime the recycler if we are forced to fragment address space
- return;
- }
- #endif
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->Prime(RecyclerPageAllocator::DefaultPrimePageCount);
- });
- }
- void
- Recycler::AddExternalMemoryUsage(size_t size)
- {
- this->autoHeap.uncollectedAllocBytes += size;
- this->autoHeap.uncollectedExternalBytes += size;
- // Generally normal GC can cleanup the uncollectedAllocBytes. But if external components
- // do fast large allocations in a row, normal GC might not kick in. Let's force the GC
- // here if we need to collect anyhow.
- CollectNow<CollectOnAllocation>();
- }
- BOOL Recycler::ReportExternalMemoryAllocation(size_t size)
- {
- return recyclerPageAllocator.RequestAlloc(size);
- }
- void Recycler::ReportExternalMemoryFailure(size_t size)
- {
- recyclerPageAllocator.ReportFailure(size);
- }
- void Recycler::ReportExternalMemoryFree(size_t size)
- {
- recyclerPageAllocator.ReportFree(size);
- }
- /*------------------------------------------------------------------------------------------------
- * Idle Decommit
- *------------------------------------------------------------------------------------------------*/
- void
- Recycler::EnterIdleDecommit()
- {
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->EnterIdleDecommit();
- });
- #ifdef IDLE_DECOMMIT_ENABLED
- ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_None, IdleDecommitSignal_NeedTimer);
- #endif
- }
- void
- Recycler::LeaveIdleDecommit()
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- bool allowTimer = (this->concurrentIdleDecommitEvent != nullptr);
- IdleDecommitSignal idleDecommitSignalRecycler = recyclerPageAllocator.LeaveIdleDecommit(allowTimer);
- IdleDecommitSignal idleDecommitSignalRecyclerLargeBlock = recyclerLargeBlockPageAllocator.LeaveIdleDecommit(allowTimer);
- IdleDecommitSignal idleDecommitSignal = max(idleDecommitSignalRecycler, idleDecommitSignalRecyclerLargeBlock);
- IdleDecommitSignal idleDecommitSignalThread = threadPageAllocator->LeaveIdleDecommit(allowTimer);
- idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalThread);
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- IdleDecommitSignal idleDecommitSignalRecyclerWithBarrier = recyclerWithBarrierPageAllocator.LeaveIdleDecommit(allowTimer);
- idleDecommitSignal = max(idleDecommitSignal, idleDecommitSignalRecyclerWithBarrier);
- #endif
- if (idleDecommitSignal != IdleDecommitSignal_None)
- {
- Assert(allowTimer);
- // Reduce the number of times we need to signal the background thread
- // by detecting whether the thread is waiting on a time out or not
- if (idleDecommitSignal == IdleDecommitSignal_NeedSignal ||
- ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedTimer, IdleDecommitSignal_None) == IdleDecommitSignal_NeedSignal)
- {
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Need Signal\n"));
- Output::Flush();
- }
- #endif
- SetEvent(this->concurrentIdleDecommitEvent);
- }
- }
- #else
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->LeaveIdleDecommit(false);
- });
- #endif
- }
- /*------------------------------------------------------------------------------------------------
- * Freeing
- *------------------------------------------------------------------------------------------------*/
- bool Recycler::ExplicitFreeLeaf(void* buffer, size_t size)
- {
- return ExplicitFreeInternalWrapper<ObjectInfoBits::LeafBit>(buffer, size);
- }
- bool Recycler::ExplicitFreeNonLeaf(void* buffer, size_t size)
- {
- return ExplicitFreeInternalWrapper<ObjectInfoBits::NoBit>(buffer, size);
- }
- size_t Recycler::GetAllocSize(size_t size)
- {
- size_t allocSize = size;
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- allocSize += verifyPad + sizeof(size_t);
- Assert(allocSize > size);
- }
- #endif
- return allocSize;
- }
- template <typename TBlockAttributes>
- void Recycler::SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes)
- {
- Assert(!heapBlock->IsLargeHeapBlock());
- Assert(heapBlock->GetObjectSize(buffer) == sizeCat);
- SmallHeapBlockT<TBlockAttributes>* smallBlock = (SmallHeapBlockT<TBlockAttributes>*)heapBlock;
- if ((attributes & ObjectInfoBits::LeafBit) == LeafBit)
- {
- Assert(smallBlock->IsLeafBlock());
- }
- else
- {
- Assert(smallBlock->IsAnyNormalBlock());
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- smallBlock->SetExplicitFreeBitForObject(buffer);
- #endif
- }
- template <ObjectInfoBits attributes>
- bool Recycler::ExplicitFreeInternalWrapper(void* buffer, size_t size)
- {
- Assert(buffer != nullptr);
- Assert(size > 0);
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ExplicitFreePhase))
- {
- return false;
- }
- #endif
- size_t allocSize = GetAllocSize(size);
- if (HeapInfo::IsSmallObject(allocSize))
- {
- return ExplicitFreeInternal<attributes, SmallAllocationBlockAttributes>(buffer, size, HeapInfo::GetAlignedSizeNoCheck(allocSize));
- }
- if (HeapInfo::IsMediumObject(allocSize))
- {
- return ExplicitFreeInternal<attributes, MediumAllocationBlockAttributes>(buffer, size, HeapInfo::GetMediumObjectAlignedSizeNoCheck(allocSize));
- }
- return false;
- }
- template <ObjectInfoBits attributes, typename TBlockAttributes>
- bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
- {
- // If the GC is in sweep state while FreeInternal is called, we might be executing a finalizer
- // which called Free, which would cause a "sweepable" buffer to be free-listed. Don't allow this.
- // Also don't allow freeing while we're shutting down the recycler since finalizers get executed
- // at this stage too
- if (this->IsSweeping() || this->IsExiting())
- {
- return false;
- }
- #if ENABLE_CONCURRENT_GC
- // We shouldn't be freeing object when we are running GC in thread
- Assert(this->IsConcurrentState() || !this->CollectionInProgress() || this->collectionState == CollectionStatePostCollectionCallback);
- #else
- Assert(!this->CollectionInProgress() || this->collectionState == CollectionStatePostCollectionCallback);
- #endif
- DebugOnly(RecyclerHeapObjectInfo info);
- Assert(this->FindHeapObject(buffer, FindHeapObjectFlags_NoFreeBitVerify, info));
- Assert((info.GetAttributes() & ~ObjectInfoBits::LeafBit) == 0); // Only NoBit or LeafBit
- #if DBG || defined(RECYCLER_MEMORY_VERIFY) || defined(RECYCLER_PAGE_HEAP)
- // Either the mainThreadHandle is null (we're not thread bound)
- // or we should be calling this function on the main script thread
- Assert(this->mainThreadHandle == NULL ||
- ::GetCurrentThreadId() == ::GetThreadId(this->mainThreadHandle));
- HeapBlock* heapBlock = this->FindHeapBlock(buffer);
- Assert(heapBlock != nullptr);
- #ifdef RECYCLER_PAGE_HEAP
- if (this->IsPageHeapEnabled())
- {
- #ifdef STACK_BACK_TRACE
- if (this->ShouldCapturePageHeapFreeStack())
- {
- if (heapBlock->IsLargeHeapBlock())
- {
- LargeHeapBlock* largeHeapBlock = (LargeHeapBlock*)heapBlock;
- if (largeHeapBlock->InPageHeapMode())
- {
- largeHeapBlock->CapturePageHeapFreeStack();
- }
- }
- }
- #endif
- // Don't do actual explicit free in page heap mode
- return false;
- }
- #endif
- SetExplicitFreeBitOnSmallBlock<TBlockAttributes>(heapBlock, sizeCat, buffer, attributes);
- #endif
- if (TBlockAttributes::IsMediumBlock)
- {
- autoHeap.FreeMediumObject<attributes>(buffer, sizeCat);
- }
- else
- {
- autoHeap.FreeSmallObject<attributes>(buffer, sizeCat);
- }
- if (size > sizeof(FreeObject) || TBlockAttributes::IsMediumBlock)
- {
- // Do this on the background somehow?
- byte expectedFill = 0;
- size_t fillSize = size - sizeof(FreeObject);
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- expectedFill = Recycler::VerifyMemFill;
- }
- #endif
- memset(((char*)buffer) + sizeof(FreeObject), expectedFill, fillSize);
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- this->SetTrackerData(buffer, &TrackerData::ExplicitFreeListObjectData);
- }
- #endif
- return true;
- }
- /*------------------------------------------------------------------------------------------------
- * Allocation
- *------------------------------------------------------------------------------------------------*/
- char *
- Recycler::TryLargeAlloc(HeapInfo * heap, size_t size, ObjectInfoBits attributes, bool nothrow)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- Assert(size != 0);
- size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);
- if (sizeCat == 0)
- {
- // overflow scenario
- // if onthrow is false, throw out of memory
- // otherwise, return null
- if (nothrow == false)
- {
- this->OutOfMemory();
- }
- return nullptr;
- }
- char * memBlock;
- if (heap->largeObjectBucket.largeBlockList != nullptr)
- {
- memBlock = heap->largeObjectBucket.largeBlockList->Alloc(sizeCat, attributes);
- if (memBlock != nullptr)
- {
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyZeroFill(memBlock, sizeCat);
- #endif
- return memBlock;
- }
- }
- // We don't care whether a GC happened here or not, because we are not reusing freed
- // large objects. We might try to allocate from existing block if we implement
- // large object reuse.
- if (!this->disableCollectOnAllocationHeuristics)
- {
- CollectNow<CollectOnAllocation>();
- }
- #ifdef RECYCLER_PAGE_HEAP
- if (IsPageHeapEnabled())
- {
- if (heap->largeObjectBucket.IsPageHeapEnabled(attributes))
- {
- memBlock = heap->largeObjectBucket.PageHeapAlloc(this, sizeCat, size, (ObjectInfoBits)attributes, autoHeap.pageHeapMode, nothrow);
- if (memBlock != nullptr)
- {
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyZeroFill(memBlock, size);
- #endif
- return memBlock;
- }
- }
- }
- #endif
- LargeHeapBlock * heapBlock = heap->AddLargeHeapBlock(sizeCat);
- if (heapBlock == nullptr)
- {
- return nullptr;
- }
- memBlock = heapBlock->Alloc(sizeCat, attributes);
- Assert(memBlock != nullptr);
- #ifdef RECYCLER_ZERO_MEM_CHECK
- VerifyZeroFill(memBlock, sizeCat);
- #endif
- return memBlock;
- }
- template <bool nothrow>
- char*
- Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- char * addr = TryLargeAlloc(heap, size, attributes, nothrow);
- if (addr == nullptr)
- {
- // Force a collection and try to allocate again.
- this->CollectNow<CollectNowForceInThread>();
- addr = TryLargeAlloc(heap, size, attributes, nothrow);
- if (addr == nullptr)
- {
- if (nothrow == false)
- {
- // Still fails, we are out of memory
- // Since nothrow is false, it's okay to throw here
- this->OutOfMemory();
- }
- else
- {
- return nullptr;
- }
- }
- }
- autoHeap.uncollectedAllocBytes += size;
- return addr;
- }
- // Explicitly instantiate both versions of LargeAlloc
- template char* Recycler::LargeAlloc<true>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
- template char* Recycler::LargeAlloc<false>(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
- void
- Recycler::OutOfMemory()
- {
- outOfMemoryFunc();
- }
- void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(void* recyclerAddr, size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
- {
- Assert(recyclerAddr);
- return ((Recycler*)recyclerAddr)->GetNormalHeapBlockAllocatorInfoForNativeAllocation(allocSize, allocatorAddress, endAddressOffset, freeListOffset, allowBumpAllocation, isOOPJIT);
- }
- void Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation(size_t allocSize, void*& allocatorAddress, uint32& endAddressOffset, uint32& freeListOffset, bool allowBumpAllocation, bool isOOPJIT)
- {
- Assert(HeapInfo::IsAlignedSize(allocSize));
- Assert(HeapInfo::IsSmallObject(allocSize));
- allocatorAddress = (char*)this + offsetof(Recycler, autoHeap) + offsetof(HeapInfo, heapBuckets) +
- sizeof(HeapBucketGroup<SmallAllocationBlockAttributes>)*((uint)(allocSize >> HeapConstants::ObjectAllocationShift) - 1)
- + HeapBucketGroup<SmallAllocationBlockAttributes>::GetHeapBucketOffset()
- + HeapBucketT<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetAllocatorHeadOffset();
- endAddressOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetEndAddressOffset();
- freeListOffset = SmallHeapBlockAllocator<SmallNormalHeapBlockT<SmallAllocationBlockAttributes>>::GetFreeObjectListOffset();;
- if (!isOOPJIT)
- {
- Assert(allocatorAddress == GetAddressOfAllocator<NoBit>(allocSize));
- Assert(endAddressOffset == GetEndAddressOffset<NoBit>(allocSize));
- Assert(freeListOffset == GetFreeObjectListOffset<NoBit>(allocSize));
- Assert(allowBumpAllocation == AllowNativeCodeBumpAllocation());
- }
- if (!allowBumpAllocation)
- {
- freeListOffset = endAddressOffset;
- }
- }
- bool Recycler::AllowNativeCodeBumpAllocation()
- {
- // In debug builds, if we need to track allocation info, we pretend there is no pointer-bump-allocation space
- // on this page, so that we always fail the check in native code and go to helper, which does the tracking.
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- return false;
- }
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->verifyEnabled)
- {
- return false;
- }
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- // Don't allow bump allocation in the JIT when page heap is turned on
- if (this->IsPageHeapEnabled())
- {
- return false;
- }
- #endif
- return true;
- }
- void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBlock, size_t sizeCat)
- {
- Assert(HeapInfo::IsAlignedSize(sizeCat));
- Assert(HeapInfo::IsSmallObject(sizeCat));
- #ifdef PROFILE_RECYCLER_ALLOC
- AssertMsg(!Recycler::DoProfileAllocTracker(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
- #endif
- RecyclerMemoryTracking::ReportAllocation(recycler, memBlock, sizeCat);
- RECYCLER_PERF_COUNTER_INC(LiveObject);
- RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, sizeCat);
- RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, sizeCat);
- #ifdef RECYCLER_MEMORY_VERIFY
- AssertMsg(!recycler->VerifyEnabled(), "Why did we register allocation tracking callback if all allocations are forced to slow path?");
- #endif
- }
- /*------------------------------------------------------------------------------------------------
- * FindRoots
- *------------------------------------------------------------------------------------------------*/
- // xplat-todo: Unify these two variants of GetStackBase
- #ifdef _WIN32
- static void* GetStackBase()
- {
- return ((NT_TIB *)NtCurrentTeb())->StackBase;
- }
- #else
- static void* GetStackBase()
- {
- ULONG_PTR highLimit = 0;
- ULONG_PTR lowLimit = 0;
- ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
- return (void*) highLimit;
- }
- #endif
- #if _M_IX86
- // REVIEW: For x86, do we care about scanning esp/ebp?
- // At GC time, they shouldn't be pointing to GC memory.
- #define SAVE_THREAD_CONTEXT() \
- void** targetBuffer = this->savedThreadContext.GetRegisters(); \
- __asm { push eax } \
- __asm { mov eax, targetBuffer } \
- __asm { mov [eax], esp} \
- __asm { mov [eax+0x4], eax} \
- __asm { mov [eax+0x8], ebx} \
- __asm { mov [eax+0xc], ecx} \
- __asm { mov [eax+0x10], edx} \
- __asm { mov [eax+0x14], ebp} \
- __asm { mov [eax+0x18], esi} \
- __asm { mov [eax+0x1c], edi} \
- __asm { pop eax }
- #elif _M_ARM
- #define SAVE_THREAD_CONTEXT() arm_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
- #elif _M_ARM64
- #define SAVE_THREAD_CONTEXT() arm64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
- #elif _M_AMD64
- #define SAVE_THREAD_CONTEXT() amd64_SAVE_REGISTERS(this->savedThreadContext.GetRegisters());
- #else
- #error Unexpected architecture
- #endif
- size_t
- Recycler::ScanArena(ArenaData * alloc, bool background)
- {
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Guest Arena %p: "), alloc);
- }
- #endif
- size_t scanRootBytes = 0;
- BEGIN_DUMP_OBJECT_ADDRESS(_u("Guest Arena"), alloc);
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- // The new write watch batching logic broke the write watch handling here.
- // For now, just disable write watch for guest arenas.
- // TODO: Re-enable this in the future.
- #if FALSE
- // Note, guest arenas are allocated out of the large block page allocator.
- bool writeWatch = alloc->GetPageAllocator() == &this->recyclerLargeBlockPageAllocator;
- // Only use write watch when we are doing rescan (Partial collect or finish concurrent)
- if (writeWatch && this->collectionState == CollectionStateRescanFindRoots)
- {
- scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetBigBlocks(background));
- scanRootBytes += TryMarkBigBlockListWithWriteWatch(alloc->GetFullBlocks());
- }
- else
- #endif
- #endif
- {
- scanRootBytes += TryMarkBigBlockList(alloc->GetBigBlocks(background));
- scanRootBytes += TryMarkBigBlockList(alloc->GetFullBlocks());
- }
- scanRootBytes += TryMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- // The arena has been scanned so the full blocks can be rearranged at this point
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if (background || !GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan)
- #endif
- {
- alloc->SetLockBlockList(false);
- }
- return scanRootBytes;
- }
- #if DBG
- bool
- Recycler::ExpectStackSkip() const
- {
- // Okay to skip the stack scan if we're in leak check mode
- bool expectStackSkip = false;
- #ifdef LEAK_REPORT
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
- #endif
- #ifdef CHECK_MEMORY_LEAK
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().CheckMemoryLeak;
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- expectStackSkip = expectStackSkip || (this->objectGraphDumper != nullptr);
- #endif
- #if defined(INTERNAL_MEM_PROTECT_HEAP_ALLOC)
- expectStackSkip = expectStackSkip || GetRecyclerFlagsTable().MemProtectHeap;
- #endif
- return expectStackSkip || isExternalStackSkippingGC;
- }
- #endif
- #pragma warning(push)
- #pragma warning(disable:4731) // 'pointer' : frame pointer register 'register' modified by inline assembly code
- size_t
- Recycler::ScanStack()
- {
- if (this->skipStack)
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(this->isFinishGCOnIdle || this->isConcurrentGCOnIdle || this->ExpectStackSkip());
- #else
- Assert(this->ExpectStackSkip());
- #endif
- return 0;
- }
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- GCETW(GC_SCANSTACK_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ScanStackPhase);
- SAVE_THREAD_CONTEXT();
- void * stackTop = this->savedThreadContext.GetStackTop();
- void * stackStart = GetStackBase();
- Assert(stackStart > stackTop);
- size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Stack %p(%8d): "), stackTop, (char *)stackStart - (char *)stackTop);
- }
- #endif
- bool doSpecialMark = collectionWrapper->DoSpecialMarkOnScanStack();
- BEGIN_DUMP_OBJECT(this, _u("Registers"));
- if (doSpecialMark)
- {
- ScanMemoryInline<true>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
- }
- else
- {
- ScanMemoryInline<false>(this->savedThreadContext.GetRegisters(), sizeof(void*) * SavedRegisterState::NumRegistersToSave);
- }
- END_DUMP_OBJECT(this);
- BEGIN_DUMP_OBJECT(this, _u("Stack"));
- if (doSpecialMark)
- {
- ScanMemoryInline<true>((void**) stackTop, stackScanned);
- }
- else
- {
- ScanMemoryInline<false>((void**) stackTop, stackScanned);
- }
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ScanStackPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- RECYCLER_PROFILE_EXEC_END(this, Js::ScanStackPhase);
- RECYCLER_STATS_ADD(this, stackCount, this->collectionStats.markData.markCount - lastMarkCount);
- GCETW(GC_SCANSTACK_STOP, (this));
- return stackScanned;
- }
- #pragma warning(pop)
- template <bool background>
- size_t Recycler::ScanPinnedObjects()
- {
- size_t scanRootBytes = 0;
- BEGIN_DUMP_OBJECT(this, _u("Pinned"));
- {
- this->TryMarkNonInterior(transientPinnedObject, &transientPinnedObject /* parentReference */);
- if (this->scanPinnedObjectMap)
- {
- // We are scanning the pinned object map now, we don't need to rescan unless
- // we reset mark or we add stuff to the map in Recycler::AddRef
- this->scanPinnedObjectMap = false;
- pinnedObjectMap.MapAndRemoveIf([this, &scanRootBytes](void * obj, PinRecord const& refCount)
- {
- if (refCount == 0)
- {
- #ifdef STACK_BACK_TRACE
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- Assert(refCount.stackBackTraces == nullptr);
- #endif
- #endif
- // Only remove if we are not doing this in the background.
- return !background;
- }
- this->TryMarkNonInterior(obj, static_cast<void*>(const_cast<PinRecord*>(&refCount)) /* parentReference */);
- scanRootBytes += sizeof(void *);
- return false;
- });
- if (!background)
- {
- this->hasPendingUnpinnedObject = false;
- }
- }
- }
- END_DUMP_OBJECT(this);
- if (background)
- {
- // Re-enable resize now that we are done
- pinnedObjectMap.EnableResize();
- }
- return scanRootBytes;
- }
- void
- RecyclerScanMemoryCallback::operator()(void** obj, size_t byteCount)
- {
- this->recycler->ScanMemoryInline<false>(obj, byteCount);
- }
- size_t
- Recycler::FindRoots()
- {
- size_t scanRootBytes = 0;
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- GCETW(GC_SCANROOTS_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootPhase);
- #ifdef ENABLE_PROJECTION
- {
- AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
- BEGIN_DUMP_OBJECT(this, _u("External Weak Referenced Roots"));
- Assert(!this->IsInRefCountTrackingForProjection());
- #if DBG
- AutoIsInRefCountTrackingForProjection autoIsInRefCountTrackingForProjection(this);
- #endif
- collectionWrapper->MarkExternalWeakReferencedObjects(this->inPartialCollectMode);
- END_DUMP_OBJECT(this);
- }
- #endif
- // go through ITracker* stuff. Don't need to do it if we are doing a partial collection
- // as we keep track and mark all trackable objects.
- // Do this first because the host might unpin stuff in the process
- if (externalRootMarker != NULL)
- {
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootExtPhase);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning External Roots: "));
- }
- #endif
- BEGIN_DUMP_OBJECT(this, _u("External Roots"));
- // PARTIALGC-TODO: How do we count external roots?
- externalRootMarker(externalRootMarkerContext);
- END_DUMP_OBJECT(this);
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootExtPhase);
- }
- }
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = true;
- Output::Print(_u("Scanning Pinned Objects: "));
- }
- #endif
- scanRootBytes += this->ScanPinnedObjects</*background = */false>();
- #if DBG_DUMP
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::MarkPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::FindRootPhase))
- {
- this->forceTraceMark = false;
- Output::Print(_u("\n"));
- Output::Flush();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->hasPendingConcurrentFindRoot);
- #endif
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindRootArenaPhase);
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- #if ENABLE_CONCURRENT_GC
- if (allocator.pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- }
- else if (this->backgroundFinishMarkCount == 0)
- #endif
- {
- // Only scan arena if we haven't finished mark in the background
- // (which is true if concurrent GC is disabled)
- scanRootBytes += ScanArena(&allocator, false);
- }
- }
- this->hasPendingDeleteGuestArena = false;
- DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
- while (externalGuestArenaIter.Next())
- {
- scanRootBytes += ScanArena(externalGuestArenaIter.Data(), false);
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootArenaPhase);
- this->ScanImplicitRoots();
- RECYCLER_PROFILE_EXEC_END(this, Js::FindRootPhase);
- GCETW(GC_SCANROOTS_STOP, (this));
- RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
- return scanRootBytes;
- }
- void
- Recycler::ScanImplicitRoots()
- {
- if (this->enableScanImplicitRoots)
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FindImplicitRootPhase);
- if (!this->hasScannedInitialImplicitRoots)
- {
- this->ScanInitialImplicitRoots();
- this->hasScannedInitialImplicitRoots = true;
- }
- else
- {
- this->ScanNewImplicitRoots();
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::FindImplicitRootPhase);
- }
- }
- size_t
- Recycler::TryMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- ArenaMemoryBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t byteCount = blockp->nbytes;
- scanRootBytes += byteCount;
- this->ScanMemory<false>(base, byteCount);
- blockp = blockp->next;
- }
- return scanRootBytes;
- }
- #if ENABLE_CONCURRENT_GC
- #if FALSE
- size_t
- Recycler::TryMarkBigBlockListWithWriteWatch(BigBlock * memoryBlocks)
- {
- DWORD pageSize = AutoSystemInfo::PageSize;
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- // Reset the write watch bit if we are scanning this in the background thread
- DWORD const writeWatchFlags = this->IsConcurrentFindRootState()? WRITE_WATCH_FLAG_RESET : 0;
- while (blockp != NULL)
- {
- char * currentAddress = (char *)blockp->GetBytes();
- char * endAddress = currentAddress + blockp->currentByte;
- char * currentPageStart = (char *)blockp->allocation;
- while (currentAddress < endAddress)
- {
- void * written;
- ULONG_PTR count = 1;
- if (::GetWriteWatch(writeWatchFlags, currentPageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) != 0 || count == 1)
- {
- char * currentEnd = min(currentPageStart + pageSize, endAddress);
- size_t byteCount = (size_t)(currentEnd - currentAddress);
- scanRootBytes += byteCount;
- this->ScanMemory<false>((void **)currentAddress, byteCount);
- }
- currentPageStart += pageSize;
- currentAddress = currentPageStart;
- }
- blockp = blockp->nextBigBlock;
- }
- return scanRootBytes;
- }
- #endif
- #endif
- size_t
- Recycler::TryMarkBigBlockList(BigBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base = (void**)blockp->GetBytes();
- size_t byteCount = blockp->currentByte;
- scanRootBytes += byteCount;
- this->ScanMemory<false>(base, byteCount);
- blockp = blockp->nextBigBlock;
- }
- return scanRootBytes;
- }
- void
- Recycler::ScanInitialImplicitRoots()
- {
- autoHeap.ScanInitialImplicitRoots();
- }
- void
- Recycler::ScanNewImplicitRoots()
- {
- autoHeap.ScanNewImplicitRoots();
- }
- /*------------------------------------------------------------------------------------------------
- * Mark
- *------------------------------------------------------------------------------------------------*/
- void
- Recycler::ResetMarks(ResetMarkFlags flags)
- {
- Assert(!this->CollectionInProgress());
- collectionState = CollectionStateResetMarks;
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Reset marks\n"));
- GCETW(GC_RESETMARKS_START, (this));
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetMarksPhase);
- Assert(IsMarkStackEmpty());
- this->scanPinnedObjectMap = true;
- this->hasScannedInitialImplicitRoots = false;
- heapBlockMap.ResetMarks();
- autoHeap.ResetMarks(flags);
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetMarksPhase);
- GCETW(GC_RESETMARKS_STOP, (this));
- #ifdef RECYCLER_MARK_TRACK
- this->ClearMarkMap();
- #endif
- }
- #ifdef RECYCLER_MARK_TRACK
- void Recycler::ClearMarkMap()
- {
- this->markMap->Clear();
- }
- void Recycler::PrintMarkMap()
- {
- this->markMap->Map([](void* key, void* value)
- {
- Output::Print(_u("0x%P => 0x%P\n"), key, value);
- });
- }
- #endif
- #if DBG
- void
- Recycler::CheckAllocExternalMark() const
- {
- Assert(!disableThreadAccessCheck);
- Assert(GetCurrentThreadContextId() == mainThreadId);
- #if ENABLE_CONCURRENT_GC
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert((this->IsMarkState() || this->IsPostEnumHeapValidationInProgress()) && collectionState != CollectionStateConcurrentMark);
- #else
- Assert(this->IsMarkState() && collectionState != CollectionStateConcurrentMark);
- #endif
- #else
- Assert(this->IsMarkState());
- #endif
- }
- #endif
- void
- Recycler::TryMarkNonInterior(void* candidate, void* parentReference)
- {
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
- #else
- Assert(!isHeapEnumInProgress);
- #endif
- Assert(this->collectionState != CollectionStateParallelMark);
- markContext.Mark</*parallel */ false, /* interior */ false, /* doSpecialMark */ false>(candidate, parentReference);
- }
- void
- Recycler::TryMarkInterior(void* candidate, void* parentReference)
- {
- #ifdef HEAP_ENUMERATION_VALIDATION
- Assert(!isHeapEnumInProgress || this->IsPostEnumHeapValidationInProgress());
- #else
- Assert(!isHeapEnumInProgress);
- #endif
- Assert(this->collectionState != CollectionStateParallelMark);
- markContext.Mark</*parallel */ false, /* interior */ true, /* doSpecialMark */ false>(candidate, parentReference);
- }
- template <bool parallel, bool interior>
- void
- Recycler::ProcessMarkContext(MarkContext * markContext)
- {
- #if ENABLE_CONCURRENT_GC
- // Copying the markContext onto the stack messes up tracked object handling, because
- // the tracked object will call TryMark[Non]Interior to report its references.
- // These functions implicitly use the main markContext on the Recycler, but this will
- // be overridden if we're processing the main markContext here.
- // So, don't do this if we are going to process tracked objects.
- // (This will be the case if we're not queuing and we're not in partial mode, which ignores tracked objects.)
- // In this case we shouldn't be parallel anyway, so we don't need to worry about cache behavior.
- // We should revisit how we manage markContexts in general in the future, and clean this up
- // by passing the MarkContext through to the tracked object's Mark method.
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode || DoQueueTrackedObject())
- #else
- if (DoQueueTrackedObject())
- #endif
- {
- // The markContext as passed is one of the markContexts that lives on the Recycler.
- // Copy it locally for processing.
- // This serves two purposes:
- // (1) Allow for better codegen because the markContext is local and we don't need to track the this pointer separately
- // (because all the key processing is inlined into this function).
- // (2) Ensure we don't have weird cache behavior because we're accidentally writing to the same cache line from
- // multiple threads during parallel marking.
- MarkContext localMarkContext = *markContext;
- // Do the actual marking.
- localMarkContext.ProcessMark<parallel, interior>();
- // Copy back to the original location.
- *markContext = localMarkContext;
- // Clear the local mark context.
- localMarkContext.Clear();
- }
- else
- #endif
- {
- Assert(!parallel);
- markContext->ProcessMark<parallel, interior>();
- }
- }
- void
- Recycler::ProcessMark(bool background)
- {
- if (background)
- {
- GCETW(GC_BACKGROUNDMARK_START, (this, backgroundRescanCount));
- }
- else
- {
- GCETW(GC_MARK_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
- if (this->enableScanInteriorPointers)
- {
- this->ProcessMarkContext</* parallel */ false, /* interior */ true>(&markContext);
- }
- else
- {
- this->ProcessMarkContext</* parallel */ false, /* interior */ false>(&markContext);
- }
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
- if (background)
- {
- GCETW(GC_BACKGROUNDMARK_STOP, (this, backgroundRescanCount));
- }
- else
- {
- GCETW(GC_MARK_STOP, (this));
- }
- DebugOnly(this->markContext.VerifyPostMarkState());
- }
- void
- Recycler::ProcessParallelMark(bool background, MarkContext * markContext)
- {
- if (background)
- {
- GCETW(GC_BACKGROUNDPARALLELMARK_START, (this, backgroundRescanCount));
- }
- else
- {
- GCETW(GC_PARALLELMARK_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::MarkPhase);
- if (this->enableScanInteriorPointers)
- {
- this->ProcessMarkContext</* parallel */ true, /* interior */ true>(markContext);
- }
- else
- {
- this->ProcessMarkContext</* parallel */ true, /* interior */ false>(markContext);
- }
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::MarkPhase);
- if (background)
- {
- GCETW(GC_BACKGROUNDPARALLELMARK_STOP, (this, backgroundRescanCount));
- }
- else
- {
- GCETW(GC_PARALLELMARK_STOP, (this));
- }
- }
- void
- Recycler::Mark()
- {
- // Marking in thread, we can just pre-mark them
- ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InThreadImplicitRoots : ResetMarkFlags_InThread);
- collectionState = CollectionStateFindRoots;
- RootMark(CollectionStateMark);
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::StartQueueTrackedObject()
- {
- Assert(!this->queueTrackedObject);
- Assert(!this->HasPendingTrackObjects());
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- Assert(!this->inPartialCollectMode);
- #endif
- this->queueTrackedObject = true;
- }
- bool
- Recycler::DoQueueTrackedObject() const
- {
- Assert(this->queueTrackedObject || !this->IsConcurrentMarkState());
- Assert(this->queueTrackedObject || this->isProcessingTrackedObjects || !this->HasPendingTrackObjects());
- #if ENABLE_PARTIAL_GC
- Assert(this->queueTrackedObject || this->inPartialCollectMode || !(this->collectionState == CollectionStateParallelMark));
- Assert(!this->queueTrackedObject || (this->clientTrackedObjectList.Empty() && !this->inPartialCollectMode));
- #else
- Assert(this->queueTrackedObject || !(this->collectionState == CollectionStateParallelMark));
- #endif
- return this->queueTrackedObject;
- }
- #endif
- void
- Recycler::ResetCollectionState()
- {
- Assert(IsMarkStackEmpty());
- this->collectionState = CollectionStateNotCollecting;
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- this->inExhaustiveCollection = false;
- this->inDecommitNowCollection = false;
- #if ENABLE_CONCURRENT_GC
- CleanupPendingUnroot();
- #endif
- #if ENABLE_PARTIAL_GC
- if (inPartialCollectMode)
- {
- FinishPartialCollect();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- // Reset the collection stats.
- this->collectionStats.finalizeCount = this->autoHeap.liveFinalizableObjectCount - this->autoHeap.newFinalizableObjectCount - this->autoHeap.pendingDisposableObjectCount;
- #endif
- }
- void
- Recycler::ResetMarkCollectionState()
- {
- // If we aborted after doing a background Rescan, there will be entries in the markContext.
- // Abort these entries and reset the markContext state.
- markContext.Abort();
- // If we aborted after doing a background parallel Mark, we wouldn't have cleaned up the
- // parallel markContexts yet. Clean these up now.
- // Note parallelMarkContext1 is not used in background parallel (see DoBackgroundParallelMark)
- parallelMarkContext2.Cleanup();
- parallelMarkContext3.Cleanup();
- this->ClearNeedOOMRescan();
- DebugOnly(this->isProcessingRescan = false);
- #if ENABLE_CONCURRENT_GC
- // If we're reseting the mark collection state, we need to unlock the block list
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- allocator.SetLockBlockList(false);
- }
- this->queueTrackedObject = false;
- #endif
- ResetCollectionState();
- }
- void
- Recycler::ResetHeuristicCounters()
- {
- autoHeap.lastUncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
- autoHeap.uncollectedAllocBytes = 0;
- autoHeap.uncollectedExternalBytes = 0;
- ResetPartialHeuristicCounters();
- }
- void Recycler::ResetPartialHeuristicCounters()
- {
- #if ENABLE_PARTIAL_GC
- autoHeap.uncollectedNewPageCount = 0;
- #endif
- }
- void
- Recycler::ScheduleNextCollection()
- {
- this->tickCountNextCollection = ::GetTickCount() + RecyclerHeuristic::TickCountCollection;
- this->tickCountNextFinishCollection = ::GetTickCount() + RecyclerHeuristic::TickCountFinishCollection;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::PrepareSweep()
- {
- autoHeap.PrepareSweep();
- }
- #endif
- size_t
- Recycler::RescanMark(DWORD waitTime)
- {
- bool const onLowMemory = this->NeedOOMRescan();
- // REVIEW: Why are we asserting for DoQueueTrackedObject here?
- // Should we split this into different asserts depending on whether
- // concurrent or partial is enabled?
- #if ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || DoQueueTrackedObject());
- #else
- Assert(DoQueueTrackedObject());
- #endif
- #endif
- {
- // We are about to do a rescan mark, which for consistency requires the runtime to stop any additional mutator threads
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->PreRescanMarkCallback();
- }
- // Always called in-thread
- Assert(collectionState == CollectionStateRescanFindRoots);
- #if ENABLE_CONCURRENT_GC
- if (!onLowMemory && // Don't do background finish mark if we are low on memory
- // Only do background finish mark if we have a time limit or it is forced
- (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase) || waitTime != INFINITE) &&
- // Don't do background finish mark if we failed to finish mark too many times
- (this->backgroundFinishMarkCount < RecyclerHeuristic::MaxBackgroundFinishMarkCount(this->GetRecyclerFlagsTable())))
- {
- this->PrepareBackgroundFindRoots();
- if (StartConcurrent(CollectionStateConcurrentFinishMark))
- {
- this->backgroundFinishMarkCount++;
- this->PrepareSweep();
- GCETW(GC_RESCANMARKWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime);
- GCETW(GC_RESCANMARKWAIT_STOP, (this, !waited));
- if (!waited)
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::BackgroundFinishMarkPhase, _u("Finish mark timed out\n"));
- {
- // We timed out doing the finish mark, notify the runtime
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->RescanMarkTimeoutCallback();
- }
- return Recycler::InvalidScanRootBytes;
- }
- Assert(collectionState == CollectionStateRescanWait);
- collectionState = CollectionStateRescanFindRoots;
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- Assert(recyclerPageAllocator.GetWriteWatchPageCount() == 0);
- Assert(recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0);
- }
- #endif
- return this->backgroundRescanRootBytes;
- }
- this->RevertPrepareBackgroundFindRoots();
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- return FinishMarkRescan(false) * AutoSystemInfo::PageSize;
- }
- size_t
- Recycler::FinishMark(DWORD waitTime)
- {
- size_t scannedRootBytes = RescanMark(waitTime);
- Assert(waitTime != INFINITE || scannedRootBytes != Recycler::InvalidScanRootBytes);
- if (scannedRootBytes != Recycler::InvalidScanRootBytes)
- {
- #if DBG && ENABLE_PARTIAL_GC
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("CTO: %d\n"), this->clientTrackedObjectList.Count());
- #endif
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing client tracked objects\n"));
- ProcessClientTrackedObjects();
- }
- else
- #endif
- #if ENABLE_CONCURRENT_GC
- if (DoQueueTrackedObject())
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Processing regular tracked objects\n"));
- ProcessTrackedObjects();
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- Assert(this->backgroundFinishMarkCount == 0 ||
- (this->recyclerPageAllocator.GetWriteWatchPageCount() == 0 &&
- this->recyclerLargeBlockPageAllocator.GetWriteWatchPageCount() == 0));
- }
- #endif
- }
- #endif
- // Continue to mark from root one more time
- scannedRootBytes += RootMark(CollectionStateRescanMark);
- }
- return scannedRootBytes;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::DoParallelMark()
- {
- Assert(this->enableParallelMark);
- Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
- // Split the mark stack into [this->maxParallelism] equal pieces.
- // The actual # of splits is returned, in case the stack was too small to split that many ways.
- MarkContext * splitContexts[3] = { ¶llelMarkContext1, ¶llelMarkContext2, ¶llelMarkContext3 };
- uint actualSplitCount = markContext.Split(this->maxParallelism - 1, splitContexts);
- Assert(actualSplitCount <= 3);
- // If we failed to split at all, just mark in thread with no parallelism.
- if (actualSplitCount == 0)
- {
- this->ProcessMark(false);
- return;
- }
- // We need to queue tracked objects while we mark in parallel.
- // (Unless it's a partial collect, in which case we don't process tracked objects at all)
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- StartQueueTrackedObject();
- }
- // Kick off marking on the background thread
- bool concurrentSuccess = StartConcurrent(CollectionStateParallelMark);
- // If there's enough work to split, then kick off marking on parallel threads too.
- // If the threads haven't been created yet, this will create them (or fail).
- bool parallelSuccess1 = false;
- bool parallelSuccess2 = false;
- if (concurrentSuccess && actualSplitCount >= 2)
- {
- parallelSuccess1 = parallelThread1.StartConcurrent();
- if (parallelSuccess1 && actualSplitCount == 3)
- {
- parallelSuccess2 = parallelThread2.StartConcurrent();
- }
- }
- // Process our portion of the split.
- this->ProcessParallelMark(false, ¶llelMarkContext1);
- // If we successfully launched parallel work, wait for it to complete.
- // If we failed, then process the work in-thread now.
- if (concurrentSuccess)
- {
- WaitForConcurrentThread(INFINITE);
- }
- else
- {
- this->ProcessParallelMark(false, &markContext);
- }
- if (actualSplitCount >= 2)
- {
- if (parallelSuccess1)
- {
- parallelThread1.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(false, ¶llelMarkContext2);
- }
- if (actualSplitCount == 3)
- {
- if (parallelSuccess2)
- {
- parallelThread2.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(false, ¶llelMarkContext3);
- }
- }
- }
- this->collectionState = CollectionStateMark;
- // Process tracked objects, if any, then do one final mark phase in case they marked any new objects.
- // (Unless it's a partial collect, in which case we don't process tracked objects at all)
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- this->ProcessTrackedObjects();
- this->ProcessMark(false);
- }
- #if ENABLE_PARTIAL_GC
- else
- {
- Assert(!this->HasPendingTrackObjects());
- }
- #endif
- }
- void
- Recycler::DoBackgroundParallelMark()
- {
- // Split the mark stack into [this->maxParallelism - 1] equal pieces (thus, "- 2" below).
- // The actual # of splits is returned, in case the stack was too small to split that many ways.
- // The parallel threads are hardwired to use parallelMarkContext2/3, so we split using those.
- uint actualSplitCount = 0;
- MarkContext * splitContexts[2] = { ¶llelMarkContext2, ¶llelMarkContext3 };
- if (this->enableParallelMark)
- {
- Assert(this->maxParallelism > 1 && this->maxParallelism <= 4);
- if (this->maxParallelism > 2)
- {
- actualSplitCount = markContext.Split(this->maxParallelism - 2, splitContexts);
- }
- }
- Assert(actualSplitCount <= 2);
- // If we failed to split at all, just mark in thread with no parallelism.
- if (actualSplitCount == 0)
- {
- this->ProcessMark(true);
- return;
- }
- #if ENABLE_PARTIAL_GC
- // We should already be set up to queue tracked objects, unless this is a partial collect
- Assert(this->DoQueueTrackedObject() || this->inPartialCollectMode);
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- this->collectionState = CollectionStateBackgroundParallelMark;
- // Kick off marking on parallel threads too, if there is work for them
- // If the threads haven't been created yet, this will create them (or fail).
- bool parallelSuccess1 = false;
- bool parallelSuccess2 = false;
- parallelSuccess1 = parallelThread1.StartConcurrent();
- if (parallelSuccess1 && actualSplitCount == 2)
- {
- parallelSuccess2 = parallelThread2.StartConcurrent();
- }
- // Process our portion of the split.
- this->ProcessParallelMark(true, &markContext);
- // If we successfully launched parallel work, wait for it to complete.
- // If we failed, then process the work in-thread now.
- if (parallelSuccess1)
- {
- parallelThread1.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(true, ¶llelMarkContext2);
- }
- if (actualSplitCount == 2)
- {
- if (parallelSuccess2)
- {
- parallelThread2.WaitForConcurrent();
- }
- else
- {
- this->ProcessParallelMark(true, ¶llelMarkContext3);
- }
- }
- this->collectionState = CollectionStateConcurrentMark;
- }
- #endif
- size_t
- Recycler::RootMark(CollectionState markState)
- {
- size_t scannedRootBytes = 0;
- Assert(!this->NeedOOMRescan() || markState == CollectionStateRescanMark);
- #if ENABLE_PARTIAL_GC
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect: %d\n"), this->inPartialCollectMode);
- #else
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("PreMark done, partial collect not available\n"));
- #endif
- Assert(collectionState == (markState == CollectionStateMark? CollectionStateFindRoots : CollectionStateRescanFindRoots));
- BOOL stacksScannedByRuntime = FALSE;
- {
- // We are about to scan roots in thread, notify the runtime first so it can stop threads if necessary and also provide additional roots
- AUTO_NO_EXCEPTION_REGION;
- RecyclerScanMemoryCallback scanMemory(this);
- scannedRootBytes += collectionWrapper->RootMarkCallback(scanMemory, &stacksScannedByRuntime);
- }
- scannedRootBytes += FindRoots();
- if (!stacksScannedByRuntime)
- {
- // The runtime did not scan the stack(s) for us, so we use the normal Recycler code.
- scannedRootBytes += ScanStack();
- }
- this->collectionState = markState;
- #if ENABLE_CONCURRENT_GC
- if (this->enableParallelMark)
- {
- this->DoParallelMark();
- }
- else
- #endif
- {
- this->ProcessMark(false);
- }
- if (this->EndMark())
- {
- // REVIEW: This heuristic doesn't apply when partial is off so there's no need
- // to modify scannedRootBytes here, correct?
- #if ENABLE_PARTIAL_GC
- // return large root scanned byte to not get into partial mode if we are low on memory
- scannedRootBytes = RecyclerSweep::MaxPartialCollectRescanRootBytes + 1;
- #endif
- }
- return scannedRootBytes;
- }
- bool
- Recycler::EndMarkCheckOOMRescan()
- {
- bool oomRescan = false;
- if (this->NeedOOMRescan())
- {
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (this->objectGraphDumper)
- {
- // Do not complete the mark if we are just dumping the object graph
- // Just report out of memory
- this->objectGraphDumper->isOutOfMemory = true;
- this->ClearNeedOOMRescan();
- }
- else
- #endif
- {
- EndMarkOnLowMemory();
- oomRescan = true;
- }
- }
- // Done with the mark stack, it should be empty.
- // Release pages it is holding.
- Assert(!HasPendingMarkObjects());
- Assert(!HasPendingTrackObjects());
- return oomRescan;
- }
- bool
- Recycler::EndMark()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- {
- // We have finished marking
- AUTO_NO_EXCEPTION_REGION;
- collectionWrapper->EndMarkCallback();
- }
- bool oomRescan = EndMarkCheckOOMRescan();
- if (ProcessObjectBeforeCollectCallbacks())
- {
- // callbacks may trigger additional marking, need to check OOMRescan again
- oomRescan |= EndMarkCheckOOMRescan();
- }
- // GC-CONSIDER: Consider keeping some page around
- GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_START, (this));
- // Clean up mark contexts, which will release held free pages
- // Do this for all contexts before we decommit, to make sure all pages are freed
- markContext.Cleanup();
- parallelMarkContext1.Cleanup();
- parallelMarkContext2.Cleanup();
- parallelMarkContext3.Cleanup();
- // Decommit all pages
- markContext.DecommitPages();
- parallelMarkContext1.DecommitPages();
- parallelMarkContext2.DecommitPages();
- parallelMarkContext3.DecommitPages();
- GCETW(GC_DECOMMIT_CONCURRENT_COLLECT_PAGE_ALLOCATOR_STOP, (this));
- return oomRescan;
- }
- void
- Recycler::EndMarkOnLowMemory()
- {
- GCETW(GC_ENDMARKONLOWMEMORY_START, (this));
- Assert(this->NeedOOMRescan());
- this->inEndMarkOnLowMemory = true;
- // Treat this as a concurrent mark reset so that we don't invalidate the allocators
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("OOM during mark- rerunning mark\n"));
- // Try to release as much memory as possible
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->DecommitNow();
- });
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- uint iterations = 0;
- #endif
- do
- {
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- #if ENABLE_CONCURRENT_GC
- // Always queue tracked objects during rescan, to avoid changes to mark state.
- // (Unless we're in a partial, in which case we ignore tracked objects)
- Assert(!this->DoQueueTrackedObject());
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- this->StartQueueTrackedObject();
- }
- #endif
- this->collectionState = CollectionStateRescanFindRoots;
- this->ClearNeedOOMRescan();
- #if DBG
- Assert(!this->isProcessingRescan);
- this->isProcessingRescan = true;
- #endif
- if (!heapBlockMap.OOMRescan(this))
- {
- // Kill the process- we couldn't even rescan a single block
- // We are in pretty low memory state at this point
- // The fail-fast is present for two reasons:
- // 1) Defense-in-depth for cases we hadn't thought about
- // 2) Deal with cases like -MaxMarkStackPageCount:1 which can still hang without the fail-fast
- MarkStack_OOM_fatal_error();
- }
- autoHeap.Rescan(RescanFlags_None);
- DebugOnly(this->isProcessingRescan = false);
- this->ProcessMark(false);
- #if ENABLE_CONCURRENT_GC
- // Process any tracked objects we found
- #if ENABLE_PARTIAL_GC
- if (!this->inPartialCollectMode)
- #endif
- {
- ProcessTrackedObjects();
- }
- #endif
- // Drain the mark stack
- ProcessMark(false);
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- iterations++;
- #endif
- }
- while (this->NeedOOMRescan());
- Assert(!markContext.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext1.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext2.GetPageAllocator()->DisableAllocationOutOfMemory());
- Assert(!parallelMarkContext3.GetPageAllocator()->DisableAllocationOutOfMemory());
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::RecyclerPhase, _u("EndMarkOnLowMemory iterations: %d\n"), iterations);
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- this->inEndMarkOnLowMemory = false;
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- this->FinishPartialCollect();
- }
- #endif
- GCETW(GC_ENDMARKONLOWMEMORY_STOP, (this));
- }
- #if DBG
- bool
- Recycler::IsMarkStackEmpty()
- {
- return (markContext.IsEmpty() && parallelMarkContext1.IsEmpty() && parallelMarkContext2.IsEmpty() && parallelMarkContext3.IsEmpty());
- }
- #endif
- #ifdef HEAP_ENUMERATION_VALIDATION
- void
- Recycler::PostHeapEnumScan(PostHeapEnumScanCallback callback, void *data)
- {
- this->pfPostHeapEnumScanCallback = callback;
- this->postHeapEnunScanData = data;
- FindRoots();
- ProcessMark(false);
- this->pfPostHeapEnumScanCallback = NULL;
- this->postHeapEnunScanData = NULL;
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- bool
- Recycler::QueueTrackedObject(FinalizableObject * trackableObject)
- {
- return markContext.AddTrackedObject(trackableObject);
- }
- #endif
- bool
- Recycler::FindImplicitRootObject(void* candidate, RecyclerHeapObjectInfo& heapObject)
- {
- HeapBlock* heapBlock = FindHeapBlock(candidate);
- if (heapBlock == nullptr)
- {
- return false;
- }
- if (heapBlock->GetHeapBlockType() < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount)
- {
- return ((SmallHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- else if (!heapBlock->IsLargeHeapBlock())
- {
- return ((MediumHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- else
- {
- return ((LargeHeapBlock*)heapBlock)->FindImplicitRootObject(candidate, this, heapObject);
- }
- }
- bool
- Recycler::FindHeapObject(void* candidate, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject)
- {
- HeapBlock* heapBlock = FindHeapBlock(candidate);
- return heapBlock && heapBlock->FindHeapObject(candidate, this, flags, heapObject);
- }
- bool
- Recycler::FindHeapObjectWithClearedAllocators(void* candidate, RecyclerHeapObjectInfo& heapObject)
- {
- // Heap enum has some case where it allocates, so we can't assert
- Assert(autoHeap.AllocatorsAreEmpty() || this->isHeapEnumInProgress);
- return FindHeapObject(candidate, FindHeapObjectFlags_ClearedAllocators, heapObject);
- }
- void*
- Recycler::GetRealAddressFromInterior(void* candidate)
- {
- HeapBlock * heapBlock = heapBlockMap.GetHeapBlock(candidate);
- if (heapBlock == NULL)
- {
- return NULL;
- }
- return heapBlock->GetRealAddressFromInterior(candidate);
- }
- /*------------------------------------------------------------------------------------------------
- * Sweep
- *------------------------------------------------------------------------------------------------*/
- #if ENABLE_PARTIAL_GC
- bool
- Recycler::Sweep(size_t rescanRootBytes, bool concurrent, bool adjustPartialHeuristics)
- #else
- bool
- Recycler::Sweep(bool concurrent)
- #endif
- {
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- Assert(!this->hasBackgroundFinishPartial);
- #endif
- #if ENABLE_CONCURRENT_GC
- if (!this->enableConcurrentSweep)
- #endif
- {
- concurrent = false;
- }
- RECYCLER_PROFILE_EXEC_BEGIN(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
- #if ENABLE_PARTIAL_GC
- recyclerSweepInstance.BeginSweep(this, rescanRootBytes, adjustPartialHeuristics);
- #else
- recyclerSweepInstance.BeginSweep(this);
- #endif
- this->SweepHeap(concurrent, *recyclerSweep);
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- // If we finished mark in the background, all the relevant write watches should already be reset
- // Only reset write watch if we didn't finish mark in the background
- if (this->backgroundFinishMarkCount == 0)
- {
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
- if (!recyclerPageAllocator.ResetWriteWatch() || !recyclerLargeBlockPageAllocator.ResetWriteWatch())
- {
- // Shouldn't happen
- Assert(false);
- // Disable partial collect
- this->enablePartialCollect = false;
- // We haven't done any partial collection yet, just get out of partial collect mode
- this->inPartialCollectMode = false;
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
- }
- #endif
- }
- #endif
- }
- }
- else
- #endif
- {
- recyclerSweep->FinishSweep();
- recyclerSweep->EndSweep();
- }
- RECYCLER_PROFILE_EXEC_END(this, concurrent? Js::ConcurrentSweepPhase : Js::SweepPhase);
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- if (!StartConcurrent(CollectionStateConcurrentSweep))
- {
- // Failed to spawn the concurrent sweep.
- // Instead, force the concurrent sweep to happen right here in thread.
- this->collectionState = CollectionStateConcurrentSweep;
- DoBackgroundWork(true);
- // Continue as if the concurrent sweep were executing
- // Next time we check for completion, we will finish the sweep just as if it had happened out of thread.
- }
- return true;
- }
- #endif
- return false;
- }
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- void Recycler::DisplayMemStats()
- {
- #ifdef PERF_COUNTERS
- #if DBG_DUMP
- printf("Recycler Live Object Count %u\n", PerfCounter::RecyclerCounterSet::GetLiveObjectCounter().GetValue());
- printf("Recycler Live Object Size %u\n", PerfCounter::RecyclerCounterSet::GetLiveObjectSizeCounter().GetValue());
- #endif
- printf("Recycler Used Page Size %u\n", PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(PageAllocatorType::PageAllocatorType_Recycler).GetValue());
- #endif
- }
- #endif
- CollectedRecyclerWeakRefHeapBlock CollectedRecyclerWeakRefHeapBlock::Instance;
- void
- Recycler::SweepWeakReference()
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::SweepWeakPhase);
- GCETW(GC_SWEEP_WEAKREF_START, (this));
- // REVIEW: Clean up the weak reference map concurrently?
- bool hasCleanup = false;
- weakReferenceMap.Map([&hasCleanup](RecyclerWeakReferenceBase * weakRef) -> bool
- {
- if (!weakRef->weakRefHeapBlock->TestObjectMarkedBit(weakRef))
- {
- hasCleanup = true;
- // Remove
- return false;
- }
- if (!weakRef->strongRefHeapBlock->TestObjectMarkedBit(weakRef->strongRef))
- {
- hasCleanup = true;
- weakRef->strongRef = nullptr;
- // Put in a dummy heap block so that we can still do the isPendingConcurrentSweep check first.
- weakRef->strongRefHeapBlock = &CollectedRecyclerWeakRefHeapBlock::Instance;
- // Remove
- return false;
- }
- // Keep
- return true;
- });
- this->weakReferenceCleanupId += hasCleanup;
- GCETW(GC_SWEEP_WEAKREF_STOP, (this));
- RECYCLER_PROFILE_EXEC_END(this, Js::SweepWeakPhase);
- }
- void
- Recycler::SweepHeap(bool concurrent, RecyclerSweep& recyclerSweep)
- {
- Assert(!this->hasPendingDeleteGuestArena);
- Assert(!this->isHeapEnumInProgress);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- if (concurrent)
- {
- collectionState = CollectionStateSetupConcurrentSweep;
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Only queue up non-leaf pages- leaf pages don't need to be zeroed out
- recyclerPageAllocator.StartQueueZeroPage();
- recyclerLargeBlockPageAllocator.StartQueueZeroPage();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.StartQueueZeroPage();
- #endif
- }
- #endif
- }
- else
- #endif
- {
- Assert(!concurrent);
- collectionState = CollectionStateSweep;
- }
- this->SweepWeakReference();
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- GCETW(GC_SETUPBACKGROUNDSWEEP_START, (this));
- }
- else
- #endif
- {
- GCETW(GC_SWEEP_START, (this));
- }
- recyclerPageAllocator.SuspendIdleDecommit();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
- #endif
- recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
- autoHeap.Sweep(recyclerSweep, concurrent);
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
- #endif
- recyclerPageAllocator.ResumeIdleDecommit();
- recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
- #if ENABLE_CONCURRENT_GC
- if (concurrent)
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- recyclerPageAllocator.StopQueueZeroPage();
- recyclerLargeBlockPageAllocator.StopQueueZeroPage();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.StopQueueZeroPage();
- #endif
- }
- #endif
- GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
- }
- else
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- Assert(!recyclerPageAllocator.HasZeroQueuedPages());
- Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- Assert(!recyclerWithBarrierPageAllocator.HasZeroQueuedPages());
- #endif
- }
- #endif
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_SWEEP_STOP, (this, sweptBytes));
- }
- #endif
- }
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- void
- Recycler::BackgroundFinishPartialCollect(RecyclerSweep * recyclerSweep)
- {
- Assert(this->inPartialCollectMode);
- Assert(recyclerSweep != nullptr && recyclerSweep->IsBackground());
- this->hasBackgroundFinishPartial = true;
- this->autoHeap.FinishPartialCollect(recyclerSweep);
- this->inPartialCollectMode = false;
- }
- #endif
- void
- Recycler::DisposeObjects()
- {
- Assert(this->allowDispose && this->hasDisposableObject && !this->inDispose);
- Assert(!isHeapEnumInProgress);
- GCETW(GC_DISPOSE_START, (this));
- ASYNC_HOST_OPERATION_START(collectionWrapper);
- this->inDispose = true;
- #ifdef PROFILE_RECYCLER_ALLOC
- // finalizer may allocate memory and dispose object can happen in the middle of allocation
- // save and restore the tracked object info
- TrackAllocData oldAllocData = { 0 };
- if (trackerDictionary != nullptr)
- {
- oldAllocData = nextAllocData;
- nextAllocData.Clear();
- }
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("Disposing objects\n"));
- }
- #endif
- // Disable dispose within this method, restore it when we're done
- AutoRestoreValue<bool> disableDispose(&this->allowDispose, false);
- #ifdef FAULT_INJECTION
- this->collectionWrapper->DisposeScriptContextByFaultInjectionCallBack();
- #endif
- // Scope timestamp to just dispose
- {
- AUTO_TIMESTAMP(dispose);
- autoHeap.DisposeObjects();
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty());
- nextAllocData = oldAllocData;
- }
- #endif
- #ifdef ENABLE_PROJECTION
- {
- Assert(!this->inResolveExternalWeakReferences);
- Assert(!this->allowDispose);
- #if DBG || defined RECYCLER_TRACE
- AutoRestoreValue<bool> inResolveExternalWeakReferencedObjects(&this->inResolveExternalWeakReferences, true);
- #endif
- AUTO_TIMESTAMP(externalWeakReferenceObjectResolve);
- // This is where it is safe to resolve external weak references as they can lead to new script entry
- collectionWrapper->ResolveExternalWeakReferencedObjects();
- }
- #endif
- Assert(!this->inResolveExternalWeakReferences);
- Assert(this->inDispose);
- this->inDispose = false;
- ASYNC_HOST_OPERATION_END(collectionWrapper);
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_DISPOSE_STOP, (this, sweptBytes));
- }
- bool
- Recycler::FinishDisposeObjects()
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] AllowDispose in FinishDisposeObject: %d\n"), this->allowDispose);
- if (this->hasDisposableObject && this->allowDispose)
- {
- CUSTOM_PHASE_PRINT_TRACE1(GetRecyclerFlagsTable(), Js::DisposePhase, _u("[Dispose] FinishDisposeObject, calling Dispose: %d\n"), this->allowDispose);
- #ifdef RECYCLER_TRACE
- CollectionParam savedCollectionParam = collectionParam;
- #endif
- DisposeObjects();
- #ifdef RECYCLER_TRACE
- collectionParam = savedCollectionParam;
- #endif
- // FinishDisposeObjects is always called either during a collection,
- // or we will check the NeedExhaustiveRepeatCollect(), so no need to check it here
- return true;
- }
- #ifdef RECYCLER_TRACE
- if (!this->inDispose && this->hasDisposableObject
- && GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Dispose object delayed"));
- }
- #endif
- return false;
- }
- template bool Recycler::FinishDisposeObjectsNow<FinishDispose>();
- template bool Recycler::FinishDisposeObjectsNow<FinishDisposeTimed>();
- template <CollectionFlags flags>
- bool
- Recycler::FinishDisposeObjectsNow()
- {
- if (inDisposeWrapper)
- {
- return false;
- }
- return FinishDisposeObjectsWrapped<flags>();
- }
- template <CollectionFlags flags>
- inline
- bool
- Recycler::FinishDisposeObjectsWrapped()
- {
- const BOOL allowDisposeFlag = flags & CollectOverride_AllowDispose;
- if (allowDisposeFlag && this->NeedDispose())
- {
- if ((flags & CollectHeuristic_TimeIfScriptActive) == CollectHeuristic_TimeIfScriptActive)
- {
- if (!this->NeedDisposeTimed())
- {
- return false;
- }
- }
- this->allowDispose = true;
- this->inDisposeWrapper = true;
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Process delayed dispose object"));
- }
- #endif
- collectionWrapper->DisposeObjects(this);
- // Dispose may get into message loop and cause a reentrant GC. If those don't allow reentrant
- // it will get added to a pending collect request.
- // FinishDisposedObjectsWrapped/DisposeObjectsWrapped is called at a place that might not be during a collection
- // and won't check NeedExhaustiveRepeatCollect(), need to check it here to honor those requests
- if (!this->CollectionInProgress() && NeedExhaustiveRepeatCollect() && ((flags & CollectOverride_NoExhaustiveCollect) != CollectOverride_NoExhaustiveCollect))
- {
- #ifdef RECYCLER_TRACE
- CaptureCollectionParam((CollectionFlags)(flags & ~CollectMode_Partial), true);
- #endif
- DoCollectWrapped((CollectionFlags)(flags & ~CollectMode_Partial));
- }
- this->inDisposeWrapper = false;
- return true;
- }
- return false;
- }
- /*------------------------------------------------------------------------------------------------
- * Collect
- *------------------------------------------------------------------------------------------------*/
- BOOL
- Recycler::CollectOnAllocatorThread()
- {
- #if ENABLE_PARTIAL_GC
- Assert(!inPartialCollectMode);
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::GarbageCollectPhase);
- #endif
- this->CollectionBegin<Js::GarbageCollectPhase>();
- this->Mark();
- // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
- // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
- // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
- // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
- // turned off, it will remain off until a concurrent GC happens
- this->Sweep();
- this->CollectionEnd<Js::GarbageCollectPhase>();
- FinishCollection();
- return true;
- }
- // Explicitly instantiate all possible modes
- template BOOL Recycler::CollectNow<CollectOnScriptIdle>();
- template BOOL Recycler::CollectNow<CollectOnScriptExit>();
- template BOOL Recycler::CollectNow<CollectOnAllocation>();
- template BOOL Recycler::CollectNow<CollectOnTypedArrayAllocation>();
- template BOOL Recycler::CollectNow<CollectOnScriptCloseNonPrimary>();
- template BOOL Recycler::CollectNow<CollectExhaustiveCandidate>();
- template BOOL Recycler::CollectNow<CollectNowConcurrent>();
- template BOOL Recycler::CollectNow<CollectNowExhaustive>();
- template BOOL Recycler::CollectNow<CollectNowDecommitNowExplicit>();
- template BOOL Recycler::CollectNow<CollectNowPartial>();
- template BOOL Recycler::CollectNow<CollectNowConcurrentPartial>();
- template BOOL Recycler::CollectNow<CollectNowForceInThread>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternal>();
- template BOOL Recycler::CollectNow<CollectNowForceInThreadExternalNoStack>();
- template BOOL Recycler::CollectNow<CollectOnRecoverFromOutOfMemory>();
- template BOOL Recycler::CollectNow<CollectNowDefault>();
- template BOOL Recycler::CollectNow<CollectOnSuspendCleanup>();
- template BOOL Recycler::CollectNow<CollectNowDefaultLSCleanup>();
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- template BOOL Recycler::CollectNow<CollectNowFinalGC>();
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- template BOOL Recycler::CollectNow<CollectNowExhaustiveSkipStack>();
- #endif
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectNow()
- {
- // Force-in-thread cannot be concurrent or partial
- CompileAssert((flags & CollectOverride_ForceInThread) == 0 || (flags & (CollectMode_Concurrent | CollectMode_Partial)) == 0);
- // Collections not allowed when the recycler is currently executing the PostCollectionCallback
- if (collectionState == CollectionStatePostCollectionCallback)
- {
- return false;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if ((disableCollection && (flags & CollectOverride_Explicit) == 0) || isShuttingDown)
- #else
- if (isShuttingDown)
- #endif
- {
- Assert(collectionState == CollectionStateNotCollecting
- || collectionState == CollectionStateExit
- || this->isShuttingDown);
- return false;
- }
- if (flags & CollectOverride_ExhaustiveCandidate)
- {
- return CollectWithExhaustiveCandidate<flags>();
- }
- return CollectInternal<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::GetPartialFlag()
- {
- #if ENABLE_PARTIAL_GC
- #pragma prefast(suppress:6313, "flags is a template parameter and can be 0")
- return(flags & CollectMode_Partial) && inPartialCollectMode;
- #else
- return false;
- #endif
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectWithExhaustiveCandidate()
- {
- Assert(flags & CollectOverride_ExhaustiveCandidate);
- // Currently we don't have any exhaustive candidate that has heuristic.
- Assert((flags & CollectHeuristic_Mask & ~CollectHeuristic_Never) == 0);
- this->hasExhaustiveCandidate = true;
- if (flags & CollectHeuristic_Never)
- {
- // This is just an exhaustive candidate notification. Don't trigger a GC.
- return false;
- }
- // Continue with the GC heuristic
- return CollectInternal<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectInternal()
- {
- // CollectHeuristic_Never flag should only be used with exhaustive candidate
- Assert((flags & CollectHeuristic_Never) == 0);
- // If we're in a re-entrant state, we want to allow GC to be triggered only
- // from allocation (or trigger points with AllowReentrant). This is to minimize
- // the number of reentrant GCs
- if ((flags & CollectOverride_AllowReentrant) == 0 && this->inDispose)
- {
- return false;
- }
- #ifdef RECYCLER_TRACE
- CaptureCollectionParam(flags);
- #endif
- #if ENABLE_CONCURRENT_GC
- const BOOL concurrent = flags & CollectMode_Concurrent;
- const BOOL finishConcurrent = flags & CollectOverride_FinishConcurrent;
- // If we priority boosted, we should try to finish it every chance we get
- // Otherwise, we should finishing it if we are not doing a concurrent GC,
- // or the flags tell us to always try to finish a concurrent GC (CollectOverride_FinishConcurrent)
- if ((!concurrent || finishConcurrent || priorityBoost) && this->CollectionInProgress())
- {
- return TryFinishConcurrentCollect<flags>();
- }
- #endif
- if (flags & CollectHeuristic_Mask)
- {
- // Check some heuristics first before starting a collection
- return CollectWithHeuristic<flags>();
- }
- // Start a collection now.
- return Collect<flags>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::CollectWithHeuristic()
- {
- // CollectHeuristic_Never flag should only be used with exhaustive candidate
- Assert((flags & CollectHeuristic_Never) == 0);
- BOOL isScriptContextCloseGCPending = FALSE;
- const BOOL allocSize = flags & CollectHeuristic_AllocSize;
- const BOOL timedIfScriptActive = flags & CollectHeuristic_TimeIfScriptActive;
- const BOOL timedIfInScript = flags & CollectHeuristic_TimeIfInScript;
- const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (flags & CollectHeuristic_Time);
- if ((flags & CollectOverride_CheckScriptContextClose) != 0)
- {
- isScriptContextCloseGCPending = this->collectionWrapper->GetIsScriptContextCloseGCPending();
- }
- // If there is a script context close GC pending, we need to do a GC regardless
- // Otherwise, we should check the heuristics to see if a GC is necessary
- if (!isScriptContextCloseGCPending)
- {
- #if ENABLE_PARTIAL_GC
- if (GetPartialFlag<flags>())
- {
- Assert(enablePartialCollect);
- Assert(allocSize);
- Assert(this->uncollectedNewPageCountPartialCollect >= RecyclerSweep::MinPartialUncollectedNewPageCount
- && this->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
- // PARTIAL-GC-REVIEW: For now, we have only alloc size heuristic
- // Maybe improve this heuristic by looking at how many free pages are in the page allocator.
- if (autoHeap.uncollectedNewPageCount > this->uncollectedNewPageCountPartialCollect)
- {
- return Collect<flags>();
- }
- }
- #endif
- // allocation byte count heuristic, collect every 1 MB allocated
- if (allocSize && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::UncollectedAllocBytesCollection()))
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- // time heuristic, allocate every 1000 clock tick, or 64 MB is allocated in a short time
- if (timed && (autoHeap.uncollectedAllocBytes < RecyclerHeuristic::Instance.MaxUncollectedAllocBytes))
- {
- uint currentTickCount = GetTickCount();
- #ifdef RECYCLER_TRACE
- collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
- #endif
- if ((int)(tickCountNextCollection - currentTickCount) >= 0)
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- }
- #ifdef RECYCLER_TRACE
- else
- {
- uint currentTickCount = GetTickCount();
- collectionParam.timeDiff = currentTickCount - tickCountNextCollection;
- }
- #endif
- }
- // Passed all the heuristic, do some GC work, maybe
- return Collect<(CollectionFlags)(flags & ~CollectMode_Partial)>();
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::Collect()
- {
- #if ENABLE_CONCURRENT_GC
- if (this->CollectionInProgress())
- {
- // If we are forced in thread, we can't be concurrent
- // If we are not concurrent we should have been handled before in CollectInternal and we shouldn't be here
- Assert((flags & CollectOverride_ForceInThread) == 0);
- Assert((flags & CollectMode_Concurrent) != 0);
- return TryFinishConcurrentCollect<flags>();
- }
- #endif
- // We clear the flag indicating that there is a GC pending because
- // of script context close, since we're about to do a GC anyway,
- // since the current GC will suffice.
- this->collectionWrapper->ClearIsScriptContextCloseGCPending();
- SetupPostCollectionFlags<flags>();
- const BOOL partial = GetPartialFlag<flags>();
- CollectionFlags finalFlags = flags;
- if (!partial)
- {
- finalFlags = (CollectionFlags)(flags & ~CollectMode_Partial);
- }
- // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
- // in SetupPostCollectionFlags because we didn't reset the inExhausitvECollection variable if
- // an exception. Use this flag to disable it the assertion if exception occur
- DebugOnly(this->hasIncompleteDoCollect = true);
- {
- RECORD_TIMESTAMP(initialCollectionStartTime);
- #ifdef NTBUILD
- this->telemetryBlock->initialCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
- this->telemetryBlock->exhaustiveRepeatedCount = 0;
- #endif
- return DoCollectWrapped(finalFlags);
- }
- }
- template <CollectionFlags flags>
- void Recycler::SetupPostCollectionFlags()
- {
- // If we are not in a collection (collection in progress or in dispose), inExhaustiveCollection should not be set
- // Otherwise, we have missed an exhaustive collection.
- Assert(this->hasIncompleteDoCollect ||
- this->CollectionInProgress() || this->inDispose || (!this->inExhaustiveCollection && !this->inDecommitNowCollection));
- // Record whether we want to start exhaustive detection or do decommit now after GC
- const BOOL exhaustive = flags & CollectMode_Exhaustive;
- const BOOL decommitNow = flags & CollectMode_DecommitNow;
- const BOOL cacheCleanup = flags & CollectMode_CacheCleanup;
- if (decommitNow)
- {
- this->inDecommitNowCollection = true;
- }
- if (exhaustive)
- {
- this->inExhaustiveCollection = true;
- }
- if (cacheCleanup)
- {
- this->inCacheCleanupCollection = true;
- }
- }
- BOOL
- Recycler::DoCollectWrapped(CollectionFlags flags)
- {
- #if ENABLE_CONCURRENT_GC
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
- DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
- #endif
- this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
- BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
- #if ENABLE_CONCURRENT_GC
- Assert(IsConcurrentExecutingState() || IsConcurrentFinishedState() || !CollectionInProgress());
- #else
- Assert(!CollectionInProgress());
- #endif
- return collected;
- }
- bool
- Recycler::NeedExhaustiveRepeatCollect() const
- {
- return this->inExhaustiveCollection && this->hasExhaustiveCandidate;
- }
- BOOL
- Recycler::DoCollect(CollectionFlags flags)
- {
- // ExecuteRecyclerCollectionFunction may cause exception. In which case, we may trigger the assert
- // in SetupPostCollectionFlags because we didn't reset the inExhaustiveCollection variable if
- // an exception. We are not in DoCollect, there shouldn't be any more exception. Reset the flag
- DebugOnly(this->hasIncompleteDoCollect = false);
- #ifdef RECYCLER_MEMORY_VERIFY
- this->Verify(Js::RecyclerPhase);
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- autoHeap.VerifyFinalize();
- #endif
- #if ENABLE_PARTIAL_GC
- BOOL partial = flags & CollectMode_Partial;
- #if DBG && defined(RECYCLER_DUMP_OBJECT_GRAPH)
- // Can't pass in RecyclerPartialStress and DumpObjectGraphOnCollect or call CollectGarbage with DumpObjectGraph
- if (GetRecyclerFlagsTable().RecyclerPartialStress) {
- Assert(!GetRecyclerFlagsTable().DumpObjectGraphOnCollect && !this->dumpObjectOnceOnCollect);
- } else if (GetRecyclerFlagsTable().DumpObjectGraphOnCollect || this->dumpObjectOnceOnCollect) {
- Assert(!GetRecyclerFlagsTable().RecyclerPartialStress);
- }
- #endif
- #ifdef RECYCLER_STRESS
- if (partial && GetRecyclerFlagsTable().RecyclerPartialStress)
- {
- this->inPartialCollectMode = true;
- this->forcePartialScanStack = true;
- }
- #endif
- #endif
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- if (dumpObjectOnceOnCollect || GetRecyclerFlagsTable().DumpObjectGraphOnCollect)
- {
- DumpObjectGraph();
- dumpObjectOnceOnCollect = false;
- #if ENABLE_PARTIAL_GC
- // Can't do a partial collect if DumpObjectGraph is set since it'll call FinishPartial
- // which will set inPartialCollectMode to false.
- partial = false;
- #endif
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- const bool concurrent = (flags & CollectMode_Concurrent) != 0;
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- #else
- const bool concurrent = false;
- #endif
- // Flush the pending dispose objects first if dispose is allowed
- Assert(!this->CollectionInProgress());
- #if ENABLE_CONCURRENT_GC
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- bool collected = FinishDisposeObjects();
- do
- {
- INC_TIMESTAMP_FIELD(exhaustiveRepeatedCount);
- RECORD_TIMESTAMP(currentCollectionStartTime);
- #ifdef NTBUILD
- this->telemetryBlock->currentCollectionStartProcessUsedBytes = PageAllocator::GetProcessUsedBytes();
- #endif
- #if ENABLE_CONCURRENT_GC
- // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentState());
- Assert(collected);
- if (forceInThread)
- {
- return this->FinishConcurrentCollect(flags);
- }
- return true;
- }
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- #if DBG
- collectionCount++;
- #endif
- collectionState = Collection_PreCollection;
- collectionWrapper->PreCollectionCallBack(flags);
- collectionState = CollectionStateNotCollecting;
- hasExhaustiveCandidate = false; // reset the candidate detection
- #ifdef RECYCLER_STATS
- #if ENABLE_PARTIAL_GC
- RecyclerCollectionStats oldCollectionStats = collectionStats;
- #endif
- memset(&collectionStats, 0, sizeof(RecyclerCollectionStats));
- this->collectionStats.startCollectAllocBytes = autoHeap.uncollectedAllocBytes;
- #if ENABLE_PARTIAL_GC
- this->collectionStats.startCollectNewPageCount = autoHeap.uncollectedNewPageCount;
- this->collectionStats.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
- #endif
- #endif
- #if ENABLE_PARTIAL_GC
- if (partial)
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!forceInThread);
- #endif
- #ifdef RECYCLER_STATS
- // We are only doing a partial GC, copy some old stats
- collectionStats.finalizeCount = oldCollectionStats.finalizeCount;
- memcpy(collectionStats.heapBlockCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
- memcpy(collectionStats.heapBlockFreeByteCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
- memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedCount, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedCount));
- memcpy(collectionStats.smallNonLeafHeapBlockPartialUnusedBytes, oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes,
- sizeof(oldCollectionStats.smallNonLeafHeapBlockPartialUnusedBytes));
- #endif
- Assert(enablePartialCollect && inPartialCollectMode);
- if (!this->PartialCollect(concurrent))
- {
- return collected;
- }
- // This disable partial if we do a repeated exhaustive GC
- partial = false;
- collected = true;
- continue;
- }
- // Not doing partial collect, we should decommit on finish collect
- decommitOnFinish = true;
- if (inPartialCollectMode)
- {
- // finish the partial collect first
- FinishPartialCollect();
- // Old heap block with free object is made available, count that as being collected
- collected = true;
- // PARTIAL-GC-CONSIDER: should we just pretend we did a GC, since we have made the free listed object
- // available to be used, instead of starting off another GC?
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- bool skipConcurrent = false;
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // If the below flag is passed in, skip doing a non-blocking concurrent collect. Instead,
- // we will do a blocking concurrent collect, which is basically an in-thread GC
- skipConcurrent = GetRecyclerFlagsTable().ForceBlockingConcurrentCollect;
- #endif
- // We are about to start a collection. Reset our heuristic counters now, so that
- // any allocations that occur during concurrent collection count toward the next collection's threshold.
- ResetHeuristicCounters();
- if (concurrent && !skipConcurrent)
- {
- Assert(!forceInThread);
- if (enableConcurrentMark)
- {
- if (StartBackgroundMarkCollect())
- {
- // Tell the caller whether we have finish a collection and there maybe free object to reuse
- return collected;
- }
- // Either ResetWriteWatch failed or the thread service failed
- // So concurrent mark is disabled, at least for now
- }
- if (enableConcurrentSweep)
- {
- if (StartConcurrentSweepCollect())
- {
- collected = true;
- continue;
- }
- // out of memory during collection
- return collected;
- }
- // concurrent collection failed, default back to non-concurrent collection
- }
- if (!forceInThread && enableConcurrentMark)
- {
- if (!CollectOnConcurrentThread())
- {
- // time out or out of memory during collection
- return collected;
- }
- }
- else
- #endif
- {
- if (!CollectOnAllocatorThread())
- {
- // out of memory during collection
- return collected;
- }
- }
- collected = true;
- #ifdef RECYCLER_TRACE
- collectionParam.repeat = true;
- #endif
- }
- while (this->NeedExhaustiveRepeatCollect());
- #if ENABLE_CONCURRENT_GC
- // DisposeObject may call script again and start another GC, so we may still be in concurrent GC state
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentState());
- Assert(collected);
- return true;
- }
- #endif
- EndCollection();
- // Tell the caller whether we have finish a collection and there maybe free object to reuse
- return collected;
- }
- void
- Recycler::EndCollection()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(this->backgroundFinishMarkCount == 0);
- #endif
- Assert(!this->CollectionInProgress());
- // no more collection is requested, we can turn exhaustive back off
- this->inExhaustiveCollection = false;
- if (this->inDecommitNowCollection || CUSTOM_CONFIG_FLAG(GetRecyclerFlagsTable(), ForceDecommitOnCollect))
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s\n"), this->mainThreadId, this, _u("Decommit now"));
- }
- #endif
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->DecommitNow();
- });
- this->inDecommitNowCollection = false;
- }
- RECORD_TIMESTAMP(lastCollectionEndTime);
- }
- #if ENABLE_PARTIAL_GC
- bool
- Recycler::PartialCollect(bool concurrent)
- {
- Assert(IsMarkStackEmpty());
- Assert(this->inPartialCollectMode);
- Assert(collectionState == CollectionStateNotCollecting);
- // Rescan again
- collectionState = CollectionStateRescanFindRoots;
- #if ENABLE_CONCURRENT_GC
- if (concurrent && enableConcurrentMark && this->partialConcurrentNextCollection)
- {
- this->PrepareBackgroundFindRoots();
- if (StartConcurrent(CollectionStateConcurrentFinishMark))
- {
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentPartialCollectPhase);
- #endif
- return false;
- }
- this->RevertPrepareBackgroundFindRoots();
- }
- #endif
- #ifdef RECYCLER_STRESS
- if (forcePartialScanStack)
- {
- // Mark the roots since they need not have been marked
- // in RecyclerPartialStress mode
- this->RootMark(collectionState);
- }
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::PartialCollectPhase);
- #endif
- bool needConcurrentSweep = false;
- this->CollectionBegin<Js::PartialCollectPhase>();
- size_t rescanRootBytes = FinishMark(INFINITE);
- Assert(rescanRootBytes != Recycler::InvalidScanRootBytes);
- needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
- this->CollectionEnd<Js::PartialCollectPhase>();
- // Only reset the new page counter
- autoHeap.uncollectedNewPageCount = 0;
- // Finish collection
- FinishCollection(needConcurrentSweep);
- return true;
- }
- void
- Recycler::ProcessClientTrackedObjects()
- {
- GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_START, (this));
- Assert(this->inPartialCollectMode);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- if (!this->clientTrackedObjectList.Empty())
- {
- SListBase<void *>::Iterator iter(&this->clientTrackedObjectList);
- while (iter.Next())
- {
- auto& reference = iter.Data();
- this->TryMarkNonInterior(reference, &reference /* parentReference */); // Reference to inside the node
- RECYCLER_STATS_INC(this, clientTrackedObjectCount);
- }
- this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- }
- GCETW(GC_PROCESS_CLIENT_TRACKED_OBJECT_STOP, (this));
- }
- void
- Recycler::ClearPartialCollect()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- this->autoHeap.unusedPartialCollectFreeBytes = 0;
- this->partialUncollectedAllocBytes = 0;
- this->clientTrackedObjectList.Clear(&this->clientTrackedObjectAllocator);
- this->uncollectedNewPageCountPartialCollect = (size_t)-1;
- }
- void
- Recycler::FinishPartialCollect(RecyclerSweep * recyclerSweep)
- {
- Assert(recyclerSweep == nullptr || !recyclerSweep->IsBackground());
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::FinishPartialPhase);
- Assert(inPartialCollectMode);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->DoQueueTrackedObject());
- #endif
- autoHeap.FinishPartialCollect(recyclerSweep);
- this->inPartialCollectMode = false;
- ClearPartialCollect();
- RECYCLER_PROFILE_EXEC_END(this, Js::FinishPartialPhase);
- }
- #endif
- void
- Recycler::EnsureNotCollecting()
- {
- #if ENABLE_CONCURRENT_GC
- FinishConcurrent<ForceFinishCollection>();
- #endif
- Assert(!this->CollectionInProgress());
- }
- void Recycler::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
- {
- // Make sure we are not collecting
- EnsureNotCollecting();
- #if ENABLE_PARTIAL_GC
- // We are updating the free bit vector, messing up the partial collection state.
- // Just get out of partial collect mode
- // GC-CONSIDER: consider adding an option in FinishConcurrent to not get into partial collect mode during sweep.
- if (inPartialCollectMode)
- {
- FinishPartialCollect();
- }
- #endif
- autoHeap.EnumerateObjects(infoBits, CallBackFunction);
- // GC-TODO: Explicit heap?
- }
- BOOL
- Recycler::IsMarkState() const
- {
- return (collectionState & Collection_Mark);
- }
- BOOL
- Recycler::IsFindRootsState() const
- {
- return (collectionState & Collection_FindRoots);
- }
- #if DBG
- BOOL
- Recycler::IsReentrantState() const
- {
- #if ENABLE_CONCURRENT_GC
- return !this->CollectionInProgress() || this->IsConcurrentState();
- #else
- return !this->CollectionInProgress();
- #endif
- }
- #endif
- #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
- template <Js::Phase phase> static ETWEventGCActivationKind GetETWEventGCActivationKind();
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::GarbageCollectPhase>() { return ETWEvent_GarbageCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ThreadCollectPhase>() { return ETWEvent_ThreadCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::ConcurrentCollectPhase>() { return ETWEvent_ConcurrentCollect; }
- template <> ETWEventGCActivationKind GetETWEventGCActivationKind<Js::PartialCollectPhase>() { return ETWEvent_PartialCollect; }
- #endif
- template <Js::Phase phase>
- void
- Recycler::CollectionBegin()
- {
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase, phase);
- GCETW_INTERNAL(GC_START, (this, GetETWEventGCActivationKind<phase>()));
- }
- template <Js::Phase phase>
- void
- Recycler::CollectionEnd()
- {
- GCETW_INTERNAL(GC_STOP, (this, GetETWEventGCActivationKind<phase>()));
- RECYCLER_PROFILE_EXEC_END2(this, phase, Js::RecyclerPhase);
- }
- #if ENABLE_CONCURRENT_GC
- size_t
- Recycler::BackgroundRescan(RescanFlags rescanFlags)
- {
- Assert(!this->isProcessingRescan);
- DebugOnly(this->isProcessingRescan = true);
- GCETW(GC_BACKGROUNDRESCAN_START, (this, backgroundRescanCount));
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRescanPhase);
- size_t rescannedPageCount = heapBlockMap.Rescan(this, ((rescanFlags & RescanFlags_ResetWriteWatch) != 0));
- rescannedPageCount += autoHeap.Rescan(rescanFlags);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRescanPhase);
- GCETW(GC_BACKGROUNDRESCAN_STOP, (this, backgroundRescanCount));
- this->backgroundRescanCount++;
- if (!this->NeedOOMRescan())
- {
- if ((rescanFlags & RescanFlags_ResetWriteWatch) != 0)
- {
- DebugOnly(this->isProcessingRescan = false);
- }
- return rescannedPageCount;
- }
- DebugOnly(this->isProcessingRescan = false);
- return Recycler::InvalidScanRootBytes;
- }
- void
- Recycler::BackgroundResetWriteWatchAll()
- {
- GCETW(GC_BACKGROUNDRESETWRITEWATCH_START, (this, -1));
- heapBlockMap.ResetDirtyPages(this);
- GCETW(GC_BACKGROUNDRESETWRITEWATCH_STOP, (this, -1));
- }
- #endif
- size_t
- Recycler::FinishMarkRescan(bool background)
- {
- #if !ENABLE_CONCURRENT_GC
- Assert(!background);
- #endif
- if (background)
- {
- GCETW(GC_BACKGROUNDRESCAN_START, (this, 0));
- }
- else
- {
- GCETW(GC_RESCAN_START, (this));
- }
- RECYCLER_PROFILE_EXEC_THREAD_BEGIN(background, this, Js::RescanPhase);
- #if ENABLE_CONCURRENT_GC
- RescanFlags const flags = (background ? RescanFlags_ResetWriteWatch : RescanFlags_None);
- #else
- Assert(!background);
- RescanFlags const flags = RescanFlags_None;
- #endif
- #if DBG
- Assert(!this->isProcessingRescan);
- this->isProcessingRescan = true;
- #endif
- #if ENABLE_CONCURRENT_GC
- size_t scannedPageCount = heapBlockMap.Rescan(this, ((flags & RescanFlags_ResetWriteWatch) != 0));
- scannedPageCount += autoHeap.Rescan(flags);
- #else
- size_t scannedPageCount = 0;
- #endif
- DebugOnly(this->isProcessingRescan = false);
- RECYCLER_PROFILE_EXEC_THREAD_END(background, this, Js::RescanPhase);
- if (background)
- {
- GCETW(GC_BACKGROUNDRESCAN_STOP, (this, 0));
- }
- else
- {
- GCETW(GC_RESCAN_STOP, (this));
- }
- return scannedPageCount;
- }
- #if ENABLE_CONCURRENT_GC
- void
- Recycler::ProcessTrackedObjects()
- {
- GCETW(GC_PROCESS_TRACKED_OBJECT_START, (this));
- #if ENABLE_PARTIAL_GC
- Assert(this->clientTrackedObjectList.Empty());
- Assert(!this->inPartialCollectMode);
- #endif
- Assert(this->DoQueueTrackedObject());
- this->queueTrackedObject = false;
- DebugOnly(this->isProcessingTrackedObjects = true);
- markContext.ProcessTracked();
- // If we did a parallel mark, we need to process any queued tracked objects from the parallel mark stack as well.
- // If we didn't, this will do nothing.
- parallelMarkContext1.ProcessTracked();
- parallelMarkContext2.ProcessTracked();
- parallelMarkContext3.ProcessTracked();
- DebugOnly(this->isProcessingTrackedObjects = false);
- GCETW(GC_PROCESS_TRACKED_OBJECT_STOP, (this));
- }
- #endif
- BOOL
- Recycler::RequestConcurrentWrapperCallback()
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!IsConcurrentExecutingState());
- // Save the original collection state
- CollectionState oldState = this->collectionState;
- // Get the background thread to start the callback
- if (StartConcurrent(CollectionStateConcurrentWrapperCallback))
- {
- // Wait for the callback to complete
- WaitForConcurrentThread(INFINITE);
- // The state must not change back until we restore the original state
- Assert(collectionState == CollectionStateConcurrentWrapperCallback);
- this->collectionState = oldState;
- return true;
- }
- #endif
- return false;
- }
- #if ENABLE_CONCURRENT_GC
- /*------------------------------------------------------------------------------------------------
- * Concurrent
- *------------------------------------------------------------------------------------------------*/
- BOOL
- Recycler::CollectOnConcurrentThread()
- {
- #if ENABLE_PARTIAL_GC
- Assert(!inPartialCollectMode);
- #endif
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ThreadCollectPhase);
- #endif
- this->CollectionBegin<Js::ThreadCollectPhase>();
- // Synchronous concurrent mark
- if (!StartSynchronousBackgroundMark())
- {
- this->CollectionEnd<Js::ThreadCollectPhase>();
- return false;
- }
- const DWORD waitTime = RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
- GCETW(GC_SYNCHRONOUSMARKWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime);
- GCETW(GC_SYNCHRONOUSMARKWAIT_STOP, (this, !waited));
- if (!waited)
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase)
- || GetRecyclerFlagsTable().Trace.IsEnabled(Js::ThreadCollectPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s: %s\n"), this->mainThreadId, this, Js::PhaseNames[Js::ThreadCollectPhase], _u("Timeout"));
- }
- #endif
- this->CollectionEnd<Js::ThreadCollectPhase>();
- return false;
- }
- // If the concurrent thread was done within the time limit, there shouldn't be
- // any object needs to be rescanned
- // CONCURRENT-TODO: Optimize it so we don't rescan in the background if we are still waiting
- // GC-TODO: Unfortunately we can't assert this, as the background code gen thread may still
- // touch GC memory (e.g. FunctionBody), causing write watch and rescan
- // in the background.
- // Assert(markContext.Empty());
- DebugOnly(this->isProcessingRescan = false);
- this->collectionState = CollectionStateMark;
- this->ProcessTrackedObjects();
- this->ProcessMark(false);
- this->EndMark();
- // Partial collect mode is not re-enabled after a non-partial in-thread GC because partial GC heuristics are not adjusted
- // after a full in-thread GC. Enabling partial collect mode causes partial GC heuristics to be reset before the next full
- // in-thread GC, thereby allowing partial GC to kick in more easily without being able to adjust heuristics after the full
- // GCs. Until we have a way of adjusting partial GC heuristics after a full in-thread GC, once partial collect mode is
- // turned off, it will remain off until a concurrent GC happens
- this->Sweep();
- this->CollectionEnd<Js::ThreadCollectPhase>();
- FinishCollection();
- return true;
- }
- // explicit instantiation
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdle>();
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnIdleAtRoot>();
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnExitScript>();
- template BOOL Recycler::FinishConcurrent<FinishConcurrentOnEnterScript>();
- template BOOL Recycler::FinishConcurrent<ForceFinishCollection>();
- template <CollectionFlags flags>
- BOOL
- Recycler::FinishConcurrent()
- {
- CompileAssert((flags & ~(CollectOverride_AllowDispose | CollectOverride_ForceFinish | CollectOverride_ForceInThread
- | CollectMode_Concurrent | CollectOverride_DisableIdleFinish | CollectOverride_BackgroundFinishMark
- | CollectOverride_SkipStack | CollectOverride_FinishConcurrentTimeout)) == 0);
- if (this->CollectionInProgress())
- {
- Assert(this->IsConcurrentEnabled());
- Assert(IsConcurrentState());
- const BOOL forceFinish = flags & CollectOverride_ForceFinish;
- if (forceFinish || !IsConcurrentExecutingState())
- {
- #if ENABLE_BACKGROUND_PAGE_FREEING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- if (this->collectionState == CollectionStateConcurrentSweep)
- {
- // Help with the background thread to zero and flush zero pages
- // if we are going to wait anyways.
- recyclerPageAllocator.ZeroQueuedPages();
- recyclerLargeBlockPageAllocator.ZeroQueuedPages();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.ZeroQueuedPages();
- #endif
- this->FlushBackgroundPages();
- }
- }
- #endif
- #ifdef RECYCLER_TRACE
- collectionParam.finishOnly = true;
- collectionParam.flags = flags;
- #endif
- #if ENABLE_CONCURRENT_GC
- // If SkipStack is provided, and we're not forcing the finish (i.e we're not in concurrent executing state)
- // then, it's fine to set the skipStack flag to true, so that during the in-thread find-roots, we'll skip
- // the stack scan
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0) && !forceFinish;
- #if DBG
- this->isFinishGCOnIdle = (flags == FinishConcurrentOnIdleAtRoot);
- #endif
- #endif
- return FinishConcurrentCollectWrapped(flags);
- }
- }
- return false;
- }
- template <CollectionFlags flags>
- BOOL
- Recycler::TryFinishConcurrentCollect()
- {
- Assert(this->CollectionInProgress());
- RECYCLER_STATS_INC(this, finishCollectTryCount);
- SetupPostCollectionFlags<flags>();
- const BOOL concurrent = flags & CollectMode_Concurrent;
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- Assert(this->IsConcurrentEnabled());
- Assert(IsConcurrentState() || IsCollectionDisabled());
- Assert(!concurrent || !forceInThread);
- if (concurrent && concurrentThread != NULL)
- {
- if (IsConcurrentExecutingState())
- {
- if (!this->priorityBoost)
- {
- uint tickCount = GetTickCount();
- if ((autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
- || (tickCount - this->tickCountStartConcurrent > RecyclerHeuristic::PriorityBoostTimeout(this->GetRecyclerFlagsTable())))
- {
- #ifdef RECYCLER_TRACE
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase))
- {
- Output::Print(_u("%04X> RC(%p): %s: "), this->mainThreadId, this, _u("Set priority normal"));
- if (autoHeap.uncollectedAllocBytes > RecyclerHeuristic::Instance.UncollectedAllocBytesConcurrentPriorityBoost)
- {
- Output::Print(_u("AllocBytes=%d (Time=%d)\n"), autoHeap.uncollectedAllocBytes, tickCount - this->tickCountStartConcurrent);
- }
- else
- {
- Output::Print(_u("Time=%d (AllocBytes=%d\n"), tickCount - this->tickCountStartConcurrent, autoHeap.uncollectedAllocBytes);
- }
- }
- #endif
- // Set it to a large number so we don't set the thread priority again
- this->priorityBoost = true;
- // The recycler thread hasn't come back in 5 seconds
- // It either has a large object graph, or it is starving.
- // Set the priority back to normal
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- }
- return FinishDisposeObjectsWrapped<flags>();
- }
- else if ((flags & CollectOverride_FinishConcurrentTimeout) != 0)
- {
- uint tickCount = GetTickCount();
- // If we haven't gone past the time to call finish collection,
- // simply call FinishDisposeObjects and return
- // Otherwise, actually go ahead and call FinishConcurrentCollectWrapped
- // We do this only if this is a collection that allows finish concurrent to timeout
- // If not, by default, we finish the collection
- if (tickCount <= this->tickCountNextFinishCollection)
- {
- return FinishDisposeObjectsWrapped<flags>();
- }
- }
- }
- return FinishConcurrentCollectWrapped(flags);
- }
- BOOL
- Recycler::IsConcurrentMarkState() const
- {
- return (collectionState & Collection_ConcurrentMark) == Collection_ConcurrentMark;
- }
- BOOL
- Recycler::IsConcurrentMarkExecutingState() const
- {
- return (collectionState & (Collection_ConcurrentMark | Collection_ExecutingConcurrent)) == (Collection_ConcurrentMark | Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentResetMarksState() const
- {
- return collectionState == CollectionStateConcurrentResetMarks;
- }
- BOOL
- Recycler::IsInThreadFindRootsState() const
- {
- CollectionState currentCollectionState = collectionState;
- return (currentCollectionState & Collection_FindRoots) && (currentCollectionState != CollectionStateConcurrentFindRoots);
- }
- BOOL
- Recycler::IsConcurrentFindRootState() const
- {
- return collectionState == CollectionStateConcurrentFindRoots;
- }
- BOOL
- Recycler::IsConcurrentExecutingState() const
- {
- return (collectionState & Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentSweepExecutingState() const
- {
- return (collectionState & (Collection_ConcurrentSweep | Collection_ExecutingConcurrent)) == (Collection_ConcurrentSweep | Collection_ExecutingConcurrent);
- }
- BOOL
- Recycler::IsConcurrentState() const
- {
- return (collectionState & Collection_Concurrent);
- }
- #if DBG
- BOOL
- Recycler::IsConcurrentFinishedState() const
- {
- return (collectionState & Collection_FinishConcurrent);
- }
- #endif
- bool
- Recycler::InitializeConcurrent(JsUtil::ThreadService *threadService)
- {
- try
- {
- AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_OutOfMemory);
- concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentWorkDoneEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- #if DBG_DUMP
- markContext.GetPageAllocator()->debugName = _u("ConcurrentCollect");
- #endif
- if (!threadService->HasCallback())
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- concurrentIdleDecommitEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentIdleDecommitEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- #endif
- concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (concurrentWorkReadyEvent == nullptr)
- {
- throw Js::OutOfMemoryException();
- }
- }
- }
- catch (Js::OutOfMemoryException)
- {
- Assert(concurrentWorkReadyEvent == nullptr);
- if (concurrentWorkDoneEvent)
- {
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- }
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- return false;
- }
- return true;
- }
- #pragma prefast(suppress:6262, "Where this function is call should have ample of stack space")
- bool Recycler::AbortConcurrent(bool restoreState)
- {
- Assert(!this->CollectionInProgress() || this->IsConcurrentState());
- // In case the thread already died, wait for that too
- HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
- // Note, concurrentThread will be null if we have a threadService.
- Assert(concurrentThread != NULL || threadService->HasCallback());
- DWORD handleCount = (concurrentThread == NULL ? 1 : 2);
- DWORD ret = WAIT_OBJECT_0;
- if (this->IsConcurrentState())
- {
- this->isAborting = true;
- if (this->concurrentThread != NULL)
- {
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- ret = WaitForMultipleObjectsEx(handleCount, handle, FALSE, INFINITE, FALSE);
- this->isAborting = false;
- Assert(this->IsConcurrentFinishedState() || ret == WAIT_OBJECT_0 + 1);
- if (ret == WAIT_OBJECT_0 && restoreState)
- {
- if (collectionState == CollectionStateRescanWait)
- {
- this->ResetMarkCollectionState();
- }
- else if (collectionState == CollectionStateTransferSweptWait)
- {
- // Make sure we don't do another GC after finishing this one.
- this->inExhaustiveCollection = false;
- // Let's just finish the sweep so that GC is in a consistent state, but don't run dispose
- // AbortConcurrent already consumed the event from the concurrent thread, just signal it so
- // FinishConcurrentCollect can wait for it again.
- SetEvent(this->concurrentWorkDoneEvent);
- EnsureNotCollecting();
- }
- else
- {
- Assert(UNREACHED);
- }
- Assert(collectionState == CollectionStateNotCollecting);
- Assert(this->isProcessingRescan == false);
- }
- else
- {
- // Even if we weren't asked to restore states, we need to clean up the pending guest arena
- CleanupPendingUnroot();
- // Also need to release any pages held by the mark stack, if we abandoned it
- markContext.Abort();
- }
- }
- Assert(!this->hasPendingDeleteGuestArena);
- return ret == WAIT_OBJECT_0;
- }
- void
- Recycler::CleanupPendingUnroot()
- {
- Assert(!this->hasPendingConcurrentFindRoot);
- if (hasPendingUnpinnedObject)
- {
- pinnedObjectMap.MapAndRemoveIf([](void * obj, PinRecord const &refCount)
- {
- #if defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT)
- #ifdef STACK_BACK_TRACE
- Assert(refCount != 0 || refCount.stackBackTraces == nullptr);
- #endif
- #endif
- return refCount == 0;
- });
- hasPendingUnpinnedObject = false;
- }
- if (hasPendingDeleteGuestArena)
- {
- DebugOnly(bool foundPendingDelete = false);
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- if (allocator.pendingDelete)
- {
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- DebugOnly(foundPendingDelete = true);
- }
- }
- hasPendingDeleteGuestArena = false;
- Assert(foundPendingDelete);
- }
- #if DBG
- else
- {
- DListBase<GuestArenaAllocator>::Iterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- Assert(!allocator.pendingDelete);
- }
- }
- #endif
- }
- void
- Recycler::FinalizeConcurrent(bool restoreState)
- {
- bool needCleanExitState = restoreState;
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH)
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().DumpObjectGraphOnExit;
- #endif
- #ifdef LEAK_REPORT
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag);
- #endif
- #ifdef CHECK_MEMORY_LEAK
- needCleanExitState = needCleanExitState || GetRecyclerFlagsTable().CheckMemoryLeak;
- #endif
- bool aborted = AbortConcurrent(needCleanExitState);
- collectionState = CollectionStateExit;
- if (aborted && this->concurrentThread != NULL)
- {
- // In case the thread already died, wait for that too
- HANDLE handle[2] = { concurrentWorkDoneEvent, concurrentThread };
- SetEvent(concurrentWorkReadyEvent);
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- // In case the thread already died, wait for that too
- DWORD fRet = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
- }
- // Shutdown parallel threads and return the handle for them so the caller can
- // close it.
- parallelThread1.Shutdown();
- parallelThread2.Shutdown();
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent != nullptr)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- if (concurrentWorkReadyEvent != NULL)
- {
- CloseHandle(concurrentWorkReadyEvent);
- concurrentWorkReadyEvent = nullptr;
- }
- if (needCleanExitState)
- {
- // We may do another marking pass to look for memory leaks;
- // Since we have shut down the concurrent thread, don't do a parallel mark.
- this->enableConcurrentMark = false;
- this->enableParallelMark = false;
- this->enableConcurrentSweep = false;
- }
- this->threadService = nullptr;
- this->concurrentThread = nullptr;
- }
- bool
- Recycler::EnableConcurrent(JsUtil::ThreadService *threadService, bool startAllThreads)
- {
- if (this->disableConcurrent)
- {
- return false;
- }
- if (!this->InitializeConcurrent(threadService))
- {
- return false;
- }
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- this->enableConcurrentMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentMarkPhase);
- this->enableParallelMark = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ParallelMarkPhase);
- this->enableConcurrentSweep = !CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::ConcurrentSweepPhase);
- #else
- this->enableConcurrentMark = true;
- this->enableParallelMark = true;
- this->enableConcurrentSweep = true;
- #endif
- if (this->enableParallelMark && this->maxParallelism == 1)
- {
- // Disable parallel mark if only 1 CPU
- this->enableParallelMark = false;
- }
- if (threadService->HasCallback())
- {
- this->threadService = threadService;
- return true;
- }
- else
- {
- bool startConcurrentThread = true;
- bool startedParallelThread1 = false;
- bool startedParallelThread2 = false;
- if (startAllThreads)
- {
- if (this->enableParallelMark && this->maxParallelism > 2)
- {
- if (!parallelThread1.EnableConcurrent(true))
- {
- startConcurrentThread = false;
- }
- else
- {
- startedParallelThread1 = true;
- if (this->maxParallelism > 3)
- {
- if (!parallelThread2.EnableConcurrent(true))
- {
- startConcurrentThread = false;
- }
- else
- {
- startedParallelThread2 = true;
- }
- }
- }
- }
- }
- if (startConcurrentThread)
- {
- HANDLE concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &Recycler::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
- if (concurrentThread != nullptr)
- {
- // Wait for recycler thread to initialize
- HANDLE handle[2] = { this->concurrentWorkDoneEvent, concurrentThread };
- DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- if (ret == WAIT_OBJECT_0)
- {
- this->threadService = threadService;
- this->concurrentThread = concurrentThread;
- return true;
- }
- CloseHandle(concurrentThread);
- }
- }
- if (startedParallelThread1)
- {
- parallelThread1.Shutdown();
- if (startedParallelThread2)
- {
- parallelThread2.Shutdown();
- }
- }
- }
- // We failed to start a concurrent thread so we set these back to false and clean up
- this->enableConcurrentMark = false;
- this->enableParallelMark = false;
- this->enableConcurrentSweep = false;
- if (concurrentWorkReadyEvent)
- {
- CloseHandle(concurrentWorkReadyEvent);
- concurrentWorkReadyEvent = nullptr;
- }
- if (concurrentWorkDoneEvent)
- {
- CloseHandle(concurrentWorkDoneEvent);
- concurrentWorkDoneEvent = nullptr;
- }
- #ifdef IDLE_DECOMMIT_ENABLED
- if (concurrentIdleDecommitEvent)
- {
- CloseHandle(concurrentIdleDecommitEvent);
- concurrentIdleDecommitEvent = nullptr;
- }
- #endif
- return false;
- }
- void
- Recycler::ShutdownThread()
- {
- if (this->IsConcurrentEnabled())
- {
- Assert(concurrentThread != NULL || threadService->HasCallback());
- FinalizeConcurrent(false);
- if (concurrentThread)
- {
- CloseHandle(concurrentThread);
- }
- }
- }
- void
- Recycler::DisableConcurrent()
- {
- if (this->IsConcurrentEnabled())
- {
- Assert(concurrentThread != NULL || threadService->HasCallback());
- FinalizeConcurrent(true);
- if (concurrentThread)
- {
- CloseHandle(concurrentThread);
- }
- this->collectionState = CollectionStateNotCollecting;
- }
- }
- bool
- Recycler::StartConcurrent(CollectionState const state)
- {
- // Reset the tick count to detect if the concurrent thread is taking too long
- tickCountStartConcurrent = GetTickCount();
- CollectionState oldState = this->collectionState;
- this->collectionState = state;
- if (threadService->HasCallback())
- {
- Assert(concurrentThread == NULL);
- Assert(concurrentWorkReadyEvent == NULL);
- if (!threadService->Invoke(Recycler::StaticBackgroundWorkCallback, this))
- {
- this->collectionState = oldState;
- return false;
- }
- return true;
- }
- else
- {
- Assert(concurrentThread != NULL);
- Assert(concurrentWorkReadyEvent != NULL);
- SetEvent(concurrentWorkReadyEvent);
- return true;
- }
- }
- BOOL
- Recycler::StartBackgroundMarkCollect()
- {
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentMarkPhase);
- #endif
- this->CollectionBegin<Js::ConcurrentCollectPhase>();
- // Asynchronous concurrent mark
- BOOL success = StartAsynchronousBackgroundMark();
- this->CollectionEnd<Js::ConcurrentCollectPhase>();
- return success;
- }
- BOOL
- Recycler::StartBackgroundMark(bool foregroundResetMark, bool foregroundFindRoots)
- {
- Assert(!this->CollectionInProgress());
- CollectionState backgroundState = CollectionStateConcurrentResetMarks;
- bool doBackgroundFindRoots = true;
- if (foregroundResetMark || foregroundFindRoots)
- {
- // REVIEW: SWB, if there's only write barrier page change, we don't scan and mark?
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ResetWriteWatchPhase);
- bool hasWriteWatch = (recyclerPageAllocator.ResetWriteWatch() && recyclerLargeBlockPageAllocator.ResetWriteWatch());
- RECYCLER_PROFILE_EXEC_END(this, Js::ResetWriteWatchPhase);
- if (!hasWriteWatch)
- {
- // Disable concurrent mark
- this->enableConcurrentMark = false;
- return false;
- }
- }
- #endif
- // In-thread synchronized GC on the concurrent thread
- ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_SynchronizedImplicitRoots : ResetMarkFlags_Synchronized);
- if (foregroundFindRoots)
- {
- this->collectionState = CollectionStateFindRoots;
- FindRoots();
- ScanStack();
- Assert(collectionState == CollectionStateFindRoots);
- backgroundState = CollectionStateConcurrentMark;
- doBackgroundFindRoots = false;
- }
- else
- {
- // Do find roots in the background
- backgroundState = CollectionStateConcurrentFindRoots;
- }
- }
- if (doBackgroundFindRoots)
- {
- this->PrepareBackgroundFindRoots();
- }
- if (!StartConcurrent(backgroundState))
- {
- if (doBackgroundFindRoots)
- {
- this->RevertPrepareBackgroundFindRoots();
- }
- this->collectionState = CollectionStateNotCollecting;
- return false;
- }
- return true;
- }
- BOOL
- Recycler::StartAsynchronousBackgroundMark()
- {
- // Debug flags to turn off background reset mark or background find roots, default to doing every concurrently
- return StartBackgroundMark(CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundResetMarksPhase), CUSTOM_PHASE_OFF1(GetRecyclerFlagsTable(), Js::BackgroundFindRootsPhase));
- }
- BOOL
- Recycler::StartSynchronousBackgroundMark()
- {
- return StartBackgroundMark(true, true);
- }
- BOOL
- Recycler::StartConcurrentSweepCollect()
- {
- Assert(collectionState == CollectionStateNotCollecting);
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentSweepPhase);
- #endif
- this->CollectionBegin<Js::ConcurrentCollectPhase>();
- this->Mark();
- // We don't have rescan data if we disabled concurrent mark, assume the worst
- // (which means it is harder to get into partial collect mode)
- #if ENABLE_PARTIAL_GC
- bool needConcurrentSweep = this->Sweep(RecyclerSweep::MaxPartialCollectRescanRootBytes, true, true);
- #else
- bool needConcurrentSweep = this->Sweep(true);
- #endif
- this->CollectionEnd<Js::ConcurrentCollectPhase>();
- FinishCollection(needConcurrentSweep);
- return true;
- }
- size_t
- Recycler::BackgroundRepeatMark()
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundRepeatMarkPhase);
- Assert(this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount - 1);
- size_t rescannedPageCount = this->BackgroundRescan(RescanFlags_ResetWriteWatch);
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return Recycler::InvalidScanRootBytes;
- }
- // Rescan the stack
- this->BackgroundScanStack();
- // Process mark stack
- this->DoBackgroundParallelMark();
- if (this->NeedOOMRescan())
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return Recycler::InvalidScanRootBytes;
- }
- #ifdef RECYCLER_STATS
- Assert(this->backgroundRescanCount >= 1 && this->backgroundRescanCount <= RecyclerHeuristic::MaxBackgroundRepeatMarkCount);
- this->collectionStats.backgroundMarkData[this->backgroundRescanCount - 1] = this->collectionStats.markData;
- #endif
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundRepeatMarkPhase);
- return rescannedPageCount;
- }
- char* Recycler::GetScriptThreadStackTop()
- {
- // We should have already checked if the recycler is thread bound or not
- Assert(mainThreadHandle != NULL);
- return (char*) savedThreadContext.GetStackTop();
- }
- size_t
- Recycler::BackgroundScanStack()
- {
- if (this->skipStack)
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::ScanStackPhase, _u("[%04X] Skipping the stack scan\n"), ::GetCurrentThreadId());
- #endif
- return 0;
- }
- if (!this->isInScript || mainThreadHandle == nullptr)
- {
- // No point in scanning the main thread's stack if we are not in script
- // We also can't scan the main thread's stack if we are not thread bounded, and didn't create the main thread's handle
- return 0;
- }
- char* stackTop = this->GetScriptThreadStackTop();
- if (stackTop != nullptr)
- {
- size_t size = (char *)stackBase - stackTop;
- ScanMemoryInline<false>((void **)stackTop, size);
- return size;
- }
- return 0;
- }
- void
- Recycler::BackgroundMark()
- {
- Assert(this->DoQueueTrackedObject());
- this->backgroundRescanCount = 0;
- this->DoBackgroundParallelMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- return;
- }
- #ifdef RECYCLER_STATS
- this->collectionStats.backgroundMarkData[0] = this->collectionStats.markData;
- #endif
- if (PHASE_OFF1(Js::BackgroundRepeatMarkPhase))
- {
- return;
- }
- // We always do one repeat mark pass.
- size_t rescannedPageCount = this->BackgroundRepeatMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- return;
- }
- Assert(rescannedPageCount != Recycler::InvalidScanRootBytes);
- // If we rescanned enough pages in the previous repeat mark pass, then do one more
- // to try to reduce the amount of work we need to do in-thread
- if (rescannedPageCount >= RecyclerHeuristic::BackgroundSecondRepeatMarkThreshold)
- {
- this->BackgroundRepeatMark();
- if (this->NeedOOMRescan() || this->isAborting)
- {
- // OOM'ed. Let's not continue
- return;
- }
- }
- }
- void
- Recycler::BackgroundResetMarks()
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundResetMarksPhase);
- GCETW(GC_BACKGROUNDRESETMARKS_START, (this));
- Assert(IsMarkStackEmpty());
- this->scanPinnedObjectMap = true;
- this->hasScannedInitialImplicitRoots = false;
- heapBlockMap.ResetMarks();
- autoHeap.ResetMarks(this->enableScanImplicitRoots ? ResetMarkFlags_InBackgroundThreadImplicitRoots : ResetMarkFlags_InBackgroundThread);
- GCETW(GC_BACKGROUNDRESETMARKS_STOP, (this));
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundResetMarksPhase);
- }
- void
- Recycler::PrepareBackgroundFindRoots()
- {
- Assert(!this->hasPendingConcurrentFindRoot);
- this->hasPendingConcurrentFindRoot = true;
- // Save the thread context here. The background thread
- // will use this saved context for the marking instead of
- // trying to get the live thread context of the thread
- SAVE_THREAD_CONTEXT();
- // Temporarily disable resize so the background can scan without
- // the memory being freed from under it
- pinnedObjectMap.DisableResize();
- // Update the cached info for big blocks in the guest arena
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- allocator.SetLockBlockList(true);
- if (allocator.pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- allocator.SetLockBlockList(false);
- guestArenaIter.RemoveCurrent(&HeapAllocator::Instance);
- }
- else if (this->backgroundFinishMarkCount == 0)
- {
- // Update the cached info for big block
- allocator.GetBigBlocks(false);
- }
- }
- this->hasPendingDeleteGuestArena = false;
- }
- void
- Recycler::RevertPrepareBackgroundFindRoots()
- {
- Assert(this->hasPendingConcurrentFindRoot);
- this->hasPendingConcurrentFindRoot = false;
- pinnedObjectMap.EnableResize();
- }
- size_t
- Recycler::BackgroundFindRoots()
- {
- #ifdef RECYCLER_STATS
- size_t lastMarkCount = this->collectionStats.markData.markCount;
- #endif
- size_t scanRootBytes = 0;
- Assert(this->IsConcurrentFindRootState());
- Assert(this->hasPendingConcurrentFindRoot);
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- // Only mark pinned object and guest arenas, which is where most of the roots are.
- // When we go back to the main thread to rescan, we will scan the rest of the root.
- // NOTE: purposefully not marking the transientPinnedObject there. as it is transient :)
- // background mark the pinned object. Since we are in concurrent find root state
- // the main thread won't delete any entries from the map, so concurrent read
- // to the map safe.
- GCETW(GC_BACKGROUNDSCANROOTS_START, (this));
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::BackgroundFindRootsPhase);
- scanRootBytes += this->ScanPinnedObjects</*background = */true>();
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::FindRootArenaPhase);
- // background mark the guest arenas. Since we are in concurrent find root state
- // the main thread won't delete any arena, so concurrent reads to them are ok.
- DListBase<GuestArenaAllocator>::EditingIterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- GuestArenaAllocator& allocator = guestArenaIter.Data();
- if (allocator.pendingDelete)
- {
- // Skip guest arena that are already marked for delete
- Assert(this->hasPendingDeleteGuestArena);
- continue;
- }
- scanRootBytes += ScanArena(&allocator, true);
- }
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::FindRootArenaPhase);
- this->ScanImplicitRoots();
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::BackgroundFindRootsPhase);
- this->hasPendingConcurrentFindRoot = false;
- this->collectionState = CollectionStateConcurrentMark;
- GCETW(GC_BACKGROUNDSCANROOTS_STOP, (this));
- RECYCLER_STATS_ADD(this, rootCount, this->collectionStats.markData.markCount - lastMarkCount);
- return scanRootBytes;
- }
- size_t
- Recycler::BackgroundFinishMark()
- {
- #if ENABLE_PARTIAL_GC
- Assert(this->inPartialCollectMode || this->DoQueueTrackedObject());
- #else
- Assert(this->DoQueueTrackedObject());
- #endif
- Assert(collectionState == CollectionStateConcurrentFinishMark);
- size_t rescannedRootBytes = FinishMarkRescan(true) * AutoSystemInfo::PageSize;
- this->collectionState = CollectionStateConcurrentFindRoots;
- rescannedRootBytes += this->BackgroundFindRoots();
- this->collectionState = CollectionStateConcurrentFinishMark;
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::MarkPhase);
- ProcessMark(true);
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::MarkPhase);
- return rescannedRootBytes;
- }
- void
- Recycler::SweepPendingObjects(RecyclerSweep& recyclerSweep)
- {
- autoHeap.SweepPendingObjects(recyclerSweep);
- }
- void
- Recycler::ConcurrentTransferSweptObjects(RecyclerSweep& recyclerSweep)
- {
- Assert(!recyclerSweep.IsBackground());
- Assert((this->collectionState & Collection_TransferSwept) == Collection_TransferSwept);
- #if ENABLE_PARTIAL_GC
- if (this->hasBackgroundFinishPartial)
- {
- this->hasBackgroundFinishPartial = false;
- this->ClearPartialCollect();
- }
- #endif
- autoHeap.ConcurrentTransferSweptObjects(recyclerSweep);
- }
- #if ENABLE_PARTIAL_GC
- void
- Recycler::ConcurrentPartialTransferSweptObjects(RecyclerSweep& recyclerSweep)
- {
- Assert(!recyclerSweep.IsBackground());
- Assert(!this->hasBackgroundFinishPartial);
- autoHeap.ConcurrentPartialTransferSweptObjects(recyclerSweep);
- }
- #endif
- BOOL
- Recycler::FinishConcurrentCollectWrapped(CollectionFlags flags)
- {
- this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
- #if ENABLE_CONCURRENT_GC
- this->skipStack = ((flags & CollectOverride_SkipStack) != 0);
- DebugOnly(this->isConcurrentGCOnIdle = (flags == CollectOnScriptIdle));
- #endif
- BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::FinishConcurrentCollect, flags);
- return collected;
- }
- BOOL
- Recycler::WaitForConcurrentThread(DWORD waitTime)
- {
- Assert(this->IsConcurrentState() || this->collectionState == CollectionStateParallelMark);
- RECYCLER_PROFILE_EXEC_BEGIN(this, Js::ConcurrentWaitPhase);
- if (concurrentThread != NULL)
- {
- // Set the priority back to normal before we wait to ensure it doesn't starve
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_NORMAL);
- }
- DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, waitTime);
- if (concurrentThread != NULL)
- {
- if (ret == WAIT_TIMEOUT)
- {
- // Keep the priority boost.
- priorityBoost = true;
- }
- else
- {
- Assert(ret == WAIT_OBJECT_0);
- // Back to below normal
- SetThreadPriority(this->concurrentThread, THREAD_PRIORITY_BELOW_NORMAL);
- priorityBoost = false;
- }
- }
- RECYCLER_PROFILE_EXEC_END(this, Js::ConcurrentWaitPhase);
- return (ret == WAIT_OBJECT_0);
- }
- #if ENABLE_BACKGROUND_PAGE_FREEING
- void
- Recycler::FlushBackgroundPages()
- {
- recyclerPageAllocator.SuspendIdleDecommit();
- recyclerPageAllocator.FlushBackgroundPages();
- recyclerPageAllocator.ResumeIdleDecommit();
- recyclerLargeBlockPageAllocator.SuspendIdleDecommit();
- recyclerLargeBlockPageAllocator.FlushBackgroundPages();
- recyclerLargeBlockPageAllocator.ResumeIdleDecommit();
- this->threadPageAllocator->SuspendIdleDecommit();
- this->threadPageAllocator->FlushBackgroundPages();
- this->threadPageAllocator->ResumeIdleDecommit();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.SuspendIdleDecommit();
- recyclerWithBarrierPageAllocator.FlushBackgroundPages();
- recyclerWithBarrierPageAllocator.ResumeIdleDecommit();
- #endif
- }
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- AutoProtectPages::AutoProtectPages(Recycler* recycler, bool protectEnabled) :
- isReadOnly(false),
- recycler(recycler)
- {
- if (protectEnabled)
- {
- recycler->heapBlockMap.MakeAllPagesReadOnly(recycler);
- isReadOnly = true;
- }
- }
- AutoProtectPages::~AutoProtectPages()
- {
- Unprotect();
- }
- void AutoProtectPages::Unprotect()
- {
- if (isReadOnly)
- {
- recycler->heapBlockMap.MakeAllPagesReadWrite(recycler);
- isReadOnly = false;
- }
- }
- #endif
- BOOL
- Recycler::FinishConcurrentCollect(CollectionFlags flags)
- {
- if (!this->IsConcurrentState())
- {
- Assert(false);
- return false;
- }
- #ifdef PROFILE_EXEC
- Js::Phase concurrentPhase = Js::ConcurrentCollectPhase;
- // TODO: Remove this workaround for unreferenced local after enabled -profile for GC
- static_cast<Js::Phase>(concurrentPhase);
- #endif
- #if ENABLE_PARTIAL_GC
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
- (concurrentPhase = ((this->inPartialCollectMode && this->IsConcurrentMarkState())?
- Js::ConcurrentPartialCollectPhase : Js::ConcurrentCollectPhase)));
- #else
- RECYCLER_PROFILE_EXEC_BEGIN2(this, Js::RecyclerPhase,
- (concurrentPhase = Js::ConcurrentCollectPhase));
- #endif
- // Don't do concurrent sweep if we have priority boosted.
- const BOOL forceInThread = flags & CollectOverride_ForceInThread;
- bool concurrent = (flags & CollectMode_Concurrent) != 0;
- concurrent = concurrent && (!priorityBoost || this->backgroundRescanCount != 1);
- #ifdef RECYCLER_TRACE
- collectionParam.priorityBoostConcurrentSweepOverride = priorityBoost;
- #endif
- const DWORD waitTime = forceInThread? INFINITE : RecyclerHeuristic::FinishConcurrentCollectWaitTime(this->GetRecyclerFlagsTable());
- GCETW(GC_FINISHCONCURRENTWAIT_START, (this, waitTime));
- const BOOL waited = WaitForConcurrentThread(waitTime);
- GCETW(GC_FINISHCONCURRENTWAIT_STOP, (this, !waited));
- if (!waited)
- {
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- return false;
- }
- bool needConcurrentSweep = false;
- if (collectionState == CollectionStateRescanWait)
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentRescan));
- #ifdef RECYCLER_TRACE
- #if ENABLE_PARTIAL_GC
- PrintCollectTrace(this->inPartialCollectMode ? Js::ConcurrentPartialCollectPhase : Js::ConcurrentMarkPhase, true);
- #else
- PrintCollectTrace(Js::ConcurrentMarkPhase, true);
- #endif
- #endif
- collectionState = CollectionStateRescanFindRoots;
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- // TODO: Change this behavior
- // ProtectPagesOnRescan is not supported in PageHeap mode because the page protection is changed
- // outside the PageAllocator in PageHeap mode and so pages are not in the state that the
- // PageAllocator expects when it goes to change the page protection
- // One viable fix is to move the guard page protection logic outside of the heap blocks
- // and into the page allocator
- AssertMsg(!(IsPageHeapEnabled() && GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan), "ProtectPagesOnRescan not supported in page heap mode");
- AutoProtectPages protectPages(this, GetRecyclerFlagsTable().RecyclerProtectPagesOnRescan);
- #endif
- const bool backgroundFinishMark = !forceInThread && concurrent && ((flags & CollectOverride_BackgroundFinishMark) != 0);
- const DWORD finishMarkWaitTime = RecyclerHeuristic::BackgroundFinishMarkWaitTime(backgroundFinishMark, GetRecyclerFlagsTable());
- size_t rescanRootBytes = FinishMark(finishMarkWaitTime);
- if (rescanRootBytes == Recycler::InvalidScanRootBytes)
- {
- Assert(this->IsMarkState());
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
- // we timeout trying to mark.
- return false;
- }
- #ifdef RECYCLER_STATS
- collectionStats.continueCollectAllocBytes = autoHeap.uncollectedAllocBytes;
- #endif
- #ifdef RECYCLER_VERIFY_MARK
- if (GetRecyclerFlagsTable().RecyclerVerifyMark)
- {
- this->VerifyMark();
- }
- #endif
- #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
- protectPages.Unprotect();
- #endif
- #if ENABLE_PARTIAL_GC
- needConcurrentSweep = this->Sweep(rescanRootBytes, concurrent, true);
- #else
- needConcurrentSweep = this->Sweep(concurrent);
- #endif
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
- }
- else
- {
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentTransferSwept));
- GCETW(GC_FLUSHZEROPAGE_START, (this));
- Assert(collectionState == CollectionStateTransferSweptWait);
- #ifdef RECYCLER_TRACE
- PrintCollectTrace(Js::ConcurrentSweepPhase, true);
- #endif
- collectionState = CollectionStateTransferSwept;
- #if ENABLE_BACKGROUND_PAGE_FREEING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // We should have zeroed all the pages in the background thread
- Assert(!recyclerPageAllocator.HasZeroQueuedPages());
- Assert(!recyclerLargeBlockPageAllocator.HasZeroQueuedPages());
- this->FlushBackgroundPages();
- }
- #endif
- GCETW(GC_FLUSHZEROPAGE_STOP, (this));
- GCETW(GC_TRANSFERSWEPTOBJECTS_START, (this));
- Assert(this->recyclerSweep != nullptr);
- Assert(!this->recyclerSweep->IsBackground());
- #if ENABLE_PARTIAL_GC
- if (this->inPartialCollectMode)
- {
- ConcurrentPartialTransferSweptObjects(*this->recyclerSweep);
- }
- else
- #endif
- {
- ConcurrentTransferSweptObjects(*this->recyclerSweep);
- }
- recyclerSweep->EndSweep();
- GCETW(GC_TRANSFERSWEPTOBJECTS_STOP, (this));
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentTransferSwept));
- }
- RECYCLER_PROFILE_EXEC_END2(this, concurrentPhase, Js::RecyclerPhase);
- FinishCollection(needConcurrentSweep);
- if (!this->CollectionInProgress())
- {
- if (NeedExhaustiveRepeatCollect())
- {
- DoCollect((CollectionFlags)(flags & ~CollectMode_Partial));
- }
- else
- {
- EndCollection();
- }
- }
- return true;
- }
- #if !DISABLE_SEH
- int
- Recycler::ExceptFilter(LPEXCEPTION_POINTERS pEP)
- {
- #if DBG
- // Assert exception code
- if (pEP->ExceptionRecord->ExceptionCode == STATUS_ASSERTION_FAILURE)
- {
- return EXCEPTION_CONTINUE_SEARCH;
- }
- #endif
- #ifdef GENERATE_DUMP
- if (Js::Configuration::Global.flags.IsEnabled(Js::DumpOnCrashFlag))
- {
- Js::Throw::GenerateDump(pEP, Js::Configuration::Global.flags.DumpOnCrash);
- }
- #endif
- #if DBG && _M_IX86
- int callerEBP = *((int*)pEP->ContextRecord->Ebp);
- Output::Print(_u("Recycler Concurrent Thread: Uncaught exception: EIP: 0x%X ExceptionCode: 0x%X EBP: 0x%X ReturnAddress: 0x%X ReturnAddress2: 0x%X\n"),
- pEP->ExceptionRecord->ExceptionAddress, pEP->ExceptionRecord->ExceptionCode, pEP->ContextRecord->Eip,
- pEP->ContextRecord->Ebp, *((int*)pEP->ContextRecord->Ebp + 1), *((int*) callerEBP + 1));
- #endif
- Output::Flush();
- return EXCEPTION_CONTINUE_SEARCH;
- }
- #endif
- unsigned int
- Recycler::StaticThreadProc(LPVOID lpParameter)
- {
- DWORD ret = (DWORD)-1;
- #if !DISABLE_SEH
- __try
- {
- #endif
- Recycler * recycler = (Recycler *)lpParameter;
- #if DBG
- recycler->concurrentThreadExited = false;
- #endif
- ret = recycler->ThreadProc();
- #if !DISABLE_SEH
- }
- __except(Recycler::ExceptFilter(GetExceptionInformation()))
- {
- Assert(false);
- }
- #endif
- return ret;
- }
- void
- Recycler::StaticBackgroundWorkCallback(void * callbackData)
- {
- Recycler * recycler = (Recycler *) callbackData;
- recycler->DoBackgroundWork(true);
- }
- #if defined(ENABLE_JS_ETW) && defined(NTBUILD)
- static ETWEventGCActivationKind
- BackgroundMarkETWEventGCActivationKind(CollectionState collectionState)
- {
- return collectionState == CollectionStateConcurrentFinishMark?
- ETWEvent_ConcurrentFinishMark : ETWEvent_ConcurrentMark;
- }
- #endif
- void
- Recycler::DoBackgroundWork(bool forceForeground)
- {
- if (this->collectionState == CollectionStateConcurrentWrapperCallback)
- {
- this->collectionWrapper->ConcurrentCallback();
- }
- else if (this->collectionState == CollectionStateParallelMark)
- {
- this->ProcessParallelMark(false, &this->markContext);
- }
- else if (this->IsConcurrentMarkState())
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, this->collectionState == CollectionStateConcurrentFinishMark?
- Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
- GCETW_INTERNAL(GC_START, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
- DebugOnly(this->markContext.GetPageAllocator()->SetConcurrentThreadId(::GetCurrentThreadId()));
- Assert(this->enableConcurrentMark);
- if (this->collectionState != CollectionStateConcurrentFinishMark)
- {
- this->StartQueueTrackedObject();
- }
- switch (this->collectionState)
- {
- case CollectionStateConcurrentResetMarks:
- this->BackgroundResetMarks();
- this->BackgroundResetWriteWatchAll();
- this->collectionState = CollectionStateConcurrentFindRoots;
- // fall-through
- case CollectionStateConcurrentFindRoots:
- this->BackgroundFindRoots();
- this->BackgroundScanStack();
- this->collectionState = CollectionStateConcurrentMark;
- // fall-through
- case CollectionStateConcurrentMark:
- this->BackgroundMark();
- Assert(this->collectionState == CollectionStateConcurrentMark);
- RECORD_TIMESTAMP(concurrentMarkFinishTime);
- break;
- case CollectionStateConcurrentFinishMark:
- this->backgroundRescanRootBytes = this->BackgroundFinishMark();
- Assert(!HasPendingMarkObjects());
- break;
- default:
- Assert(false);
- break;
- };
- GCETW_INTERNAL(GC_STOP, (this, BackgroundMarkETWEventGCActivationKind(this->collectionState)));
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, this->collectionState == CollectionStateConcurrentFinishMark?
- Js::BackgroundFinishMarkPhase : Js::ConcurrentMarkPhase);
- this->collectionState = CollectionStateRescanWait;
- DebugOnly(this->markContext.GetPageAllocator()->ClearConcurrentThreadId());
- }
- else
- {
- RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
- GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep));
- GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
- Assert(this->enableConcurrentSweep);
- Assert(this->collectionState == CollectionStateConcurrentSweep);
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Zero the queued pages first so they are available to be allocated
- recyclerPageAllocator.BackgroundZeroQueuedPages();
- recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
- #endif
- }
- #endif
- GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
- GCETW(GC_BACKGROUNDSWEEP_START, (this));
- Assert(this->recyclerSweep != nullptr);
- this->recyclerSweep->BackgroundSweep();
- uint sweptBytes = 0;
- #ifdef RECYCLER_STATS
- sweptBytes = (uint)collectionStats.objectSweptBytes;
- #endif
- GCETW(GC_BACKGROUNDSWEEP_STOP, (this, sweptBytes));
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // Drain the zero queue again as we might have free more during sweep
- // in the background
- GCETW(GC_BACKGROUNDZEROPAGE_START, (this));
- recyclerPageAllocator.BackgroundZeroQueuedPages();
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- recyclerWithBarrierPageAllocator.BackgroundZeroQueuedPages();
- #endif
- recyclerLargeBlockPageAllocator.BackgroundZeroQueuedPages();
- GCETW(GC_BACKGROUNDZEROPAGE_STOP, (this));
- }
- #endif
- GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
- Assert(this->collectionState == CollectionStateConcurrentSweep);
- this->collectionState = CollectionStateTransferSweptWait;
- RECYCLER_PROFILE_EXEC_BACKGROUND_END(this, Js::ConcurrentSweepPhase);
- }
- SetEvent(this->concurrentWorkDoneEvent);
- collectionWrapper->WaitCollectionCallBack();
- }
- DWORD
- Recycler::ThreadProc()
- {
- Assert(this->IsConcurrentEnabled());
- #if !defined(_UCRT)
- // We do this before we set the concurrentWorkDoneEvent because GetModuleHandleEx requires
- // getting the loader lock. We could have the following case:
- // Thread A => Initialize Concurrent Thread (C)
- // C signals Signal Done
- // C yields since its lower priority
- // Thread A starts running- and is told to shut down.
- // Thread A grabs loader lock as part of the shutdown sequence
- // Thread A waits for C to be done
- // C wakes up now- and tries to grab loader lock.
- // To prevent this deadlock, we call GetModuleHandleEx first and then set the concurrentWorkDoneEvent
- HMODULE dllHandle = NULL;
- if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&Recycler::StaticThreadProc, &dllHandle))
- {
- dllHandle = NULL;
- }
- #endif
- #ifdef ENABLE_JS_ETW
- // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
- GUID activityId = { 0 };
- auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
- Assert(eventActivityIdControlResult == ERROR_SUCCESS);
- #endif
- // Signal that the thread has started
- SetEvent(this->concurrentWorkDoneEvent);
- SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
- #if defined(DBG) && defined(PROFILE_EXEC)
- this->backgroundProfilerPageAllocator.SetConcurrentThreadId(::GetCurrentThreadId());
- #endif
- #ifdef IDLE_DECOMMIT_ENABLED
- DWORD handleCount = this->concurrentIdleDecommitEvent? 2 : 1;
- HANDLE handles[2] = { this->concurrentWorkReadyEvent, this->concurrentIdleDecommitEvent };
- #endif
- do
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- needIdleDecommitSignal = IdleDecommitSignal_None;
- DWORD threadPageAllocatorWaitTime = threadPageAllocator->IdleDecommit();
- DWORD recyclerPageAllocatorWaitTime = recyclerPageAllocator.IdleDecommit();
- DWORD waitTime = min(threadPageAllocatorWaitTime, recyclerPageAllocatorWaitTime);
- DWORD recyclerLargeBlockPageAllocatorWaitTime = recyclerLargeBlockPageAllocator.IdleDecommit();
- waitTime = min(waitTime, recyclerLargeBlockPageAllocatorWaitTime);
- #ifdef RECYCLER_WRITE_BARRIER_ALLOC_SEPARATE_PAGE
- DWORD recyclerWithBarrierPageAllocatorWaitTime = recyclerWithBarrierPageAllocator.IdleDecommit();
- waitTime = min(waitTime, recyclerWithBarrierPageAllocatorWaitTime);
- #endif
- if (waitTime == INFINITE)
- {
- DWORD ret = ::InterlockedCompareExchange(&needIdleDecommitSignal, IdleDecommitSignal_NeedSignal, IdleDecommitSignal_None);
- if (ret == IdleDecommitSignal_NeedTimer)
- {
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Need Timer\n"));
- Output::Flush();
- }
- #endif
- continue;
- }
- }
- #if DBG
- else
- {
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- Output::Print(_u("Recycler Thread IdleDecommit Wait %d\n"), waitTime);
- Output::Flush();
- }
- }
- #endif
- DWORD result = WaitForMultipleObjectsEx(handleCount, handles, FALSE, waitTime, FALSE);
- if (result != WAIT_OBJECT_0)
- {
- Assert((handleCount == 2 && result == WAIT_OBJECT_0 + 1) || (waitTime != INFINITE && result == WAIT_TIMEOUT));
- #if DBG
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::IdleDecommitPhase))
- {
- if (result == WAIT_TIMEOUT)
- {
- Output::Print(_u("Recycler Thread IdleDecommit Timeout: %d\n"), waitTime);
- }
- else
- {
- Output::Print(_u("Recycler Thread IdleDecommit Signaled\n"));
- }
- Output::Flush();
- }
- #endif
- continue;
- }
- #else
- DWORD result = WaitForSingleObject(this->concurrentWorkReadyEvent, INFINITE);
- Assert(result == WAIT_OBJECT_0);
- #endif
- if (this->collectionState == CollectionStateExit)
- {
- #if DBG
- this->concurrentThreadExited = true;
- #endif
- break;
- }
- DoBackgroundWork();
- }
- while (true);
- SetEvent(this->concurrentWorkDoneEvent);
- #if !defined(_UCRT)
- if (dllHandle)
- {
- FreeLibraryAndExitThread(dllHandle, 0);
- }
- else
- #endif
- {
- return 0;
- }
- }
- #endif //ENABLE_CONCURRENT_GC
- void
- Recycler::FinishCollection(bool needConcurrentSweep)
- {
- #if ENABLE_CONCURRENT_GC
- Assert(!!this->InConcurrentSweep() == needConcurrentSweep);
- #else
- Assert(!needConcurrentSweep);
- #endif
- if (!needConcurrentSweep)
- {
- FinishCollection();
- }
- else
- {
- FinishDisposeObjects();
- }
- }
- void
- Recycler::FinishCollection()
- {
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- Assert(!this->hasBackgroundFinishPartial);
- #endif
- Assert(!this->hasPendingDeleteGuestArena);
- // Reset the time heuristics
- ScheduleNextCollection();
- {
- AutoSwitchCollectionStates collectionState(this,
- /* entry state */ CollectionStatePostCollectionCallback,
- /* exit state */ CollectionStateNotCollecting);
- collectionWrapper->PostCollectionCallBack();
- }
- #if ENABLE_CONCURRENT_GC
- this->backgroundFinishMarkCount = 0;
- #endif
- // Do a partial page decommit now
- if (decommitOnFinish)
- {
- ForEachPageAllocator([](IdleDecommitPageAllocator* pageAlloc)
- {
- pageAlloc->DecommitNow(false);
- });
- this->decommitOnFinish = false;
- }
- RECYCLER_SLOW_CHECK(autoHeap.Check());
- #ifdef RECYCLER_MEMORY_VERIFY
- this->Verify(Js::RecyclerPhase);
- #endif
- #ifdef RECYCLER_FINALIZE_CHECK
- autoHeap.VerifyFinalize();
- #endif
- #ifdef ENABLE_JS_ETW
- FlushFreeRecord();
- #endif
- FinishDisposeObjects();
- #ifdef RECYCLER_FINALIZE_CHECK
- if (!this->IsMarkState())
- {
- autoHeap.VerifyFinalize();
- }
- #endif
- #ifdef RECYCLER_STATS
- if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
- {
- PrintCollectStats();
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (MemoryProfiler::IsTraceEnabled(true))
- {
- PrintAllocStats();
- }
- #endif
- #ifdef DUMP_FRAGMENTATION_STATS
- if (GetRecyclerFlagsTable().DumpFragmentationStats)
- {
- autoHeap.DumpFragmentationStats();
- }
- #endif
- RECORD_TIMESTAMP(currentCollectionEndTime);
- }
- void
- Recycler::SetExternalRootMarker(ExternalRootMarker fn, void * context)
- {
- externalRootMarker = fn;
- externalRootMarkerContext = context;
- }
- // TODO: (leish) remove following function? seems not make sense to re-allocate in recycler
- ArenaData **
- Recycler::RegisterExternalGuestArena(ArenaData* guestArena)
- {
- return externalGuestArenaList.PrependNode(&NoThrowHeapAllocator::Instance, guestArena);
- }
- void
- Recycler::UnregisterExternalGuestArena(ArenaData* guestArena)
- {
- externalGuestArenaList.Remove(&NoThrowHeapAllocator::Instance, guestArena);
- }
- void
- Recycler::UnregisterExternalGuestArena(ArenaData** guestArena)
- {
- externalGuestArenaList.RemoveElement(&NoThrowHeapAllocator::Instance, guestArena);
- }
- void
- Recycler::SetCollectionWrapper(RecyclerCollectionWrapper * wrapper)
- {
- this->collectionWrapper = wrapper;
- #if LARGEHEAPBLOCK_ENCODING
- this->Cookie = wrapper->GetRandomNumber();
- #else
- this->Cookie = 0;
- #endif
- }
- char *
- Recycler::Realloc(void* buffer, DECLSPEC_GUARD_OVERFLOW size_t existingBytes, DECLSPEC_GUARD_OVERFLOW size_t requestedBytes, bool truncate)
- {
- Assert(requestedBytes > 0);
- if (existingBytes == 0)
- {
- Assert(buffer == nullptr);
- return Alloc(requestedBytes);
- }
- Assert(buffer != nullptr);
- size_t nbytes = AllocSizeMath::Align(requestedBytes, HeapConstants::ObjectGranularity);
- // Since we successfully allocated, we shouldn't have integer overflow here
- size_t nbytesExisting = AllocSizeMath::Align(existingBytes, HeapConstants::ObjectGranularity);
- Assert(nbytesExisting >= existingBytes);
- if (nbytes == nbytesExisting)
- {
- return (char *)buffer;
- }
- char* replacementBuf = this->Alloc(requestedBytes);
- if (replacementBuf != nullptr)
- {
- // Truncate
- if (existingBytes > requestedBytes && truncate)
- {
- js_memcpy_s(replacementBuf, requestedBytes, buffer, requestedBytes);
- }
- else
- {
- js_memcpy_s(replacementBuf, requestedBytes, buffer, existingBytes);
- }
- }
- if (nbytesExisting > 0)
- {
- this->Free(buffer, nbytesExisting);
- }
- return replacementBuf;
- }
- bool
- Recycler::ForceSweepObject()
- {
- #ifdef RECYCLER_TEST_SUPPORT
- if (BinaryFeatureControl::RecyclerTest())
- {
- if (checkFn != nullptr)
- {
- return true;
- }
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (trackerDictionary != nullptr)
- {
- // Need to sweep object if we are tracing recycler allocs
- return true;
- }
- #endif
- #ifdef RECYCLER_STATS
- if (CUSTOM_PHASE_STATS1(this->GetRecyclerFlagsTable(), Js::RecyclerPhase))
- {
- return true;
- }
- #endif
- #if DBG
- // Force sweeping the object so we can assert that we are not sweeping objects that are still implicit roots
- if (this->enableScanImplicitRoots)
- {
- return true;
- }
- #endif
- return false;
- }
- bool
- Recycler::ShouldIdleCollectOnExit()
- {
- // Always reset partial heuristics even if we are not doing idle collecting
- // So we don't carry the heuristics to the next script activation
- this->ResetPartialHeuristicCounters();
- if (this->CollectionInProgress())
- {
- #ifdef RECYCLER_TRACE
- CUSTOM_PHASE_PRINT_VERBOSE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase, _u("%04X> Skipping scheduling Idle Collect. Reason: Collection in progress\n"), ::GetCurrentThreadId());
- #endif
- // Don't schedule an idle collect if there is a collection going on already
- // IDLE-GC-TODO: Fix ResetHeuristics in the GC so we can detect memory allocation during
- // the concurrent collect and still schedule an idle collect
- return false;
- }
- if (CUSTOM_PHASE_FORCE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
- {
- return true;
- }
- uint32 nextTime = tickCountNextCollection - tickDiffToNextCollect;
- // We will try to start a concurrent collect if we are within .9 ms to next scheduled collection, AND,
- // the size of allocation is larger than 32M. This is similar to CollectionAllocation logic, just
- // earlier in both time heuristic and size heuristic, so we can do some concurrent GC while we are
- // not in script.
- if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit
- && GetTickCount() > nextTime)
- {
- #ifdef RECYCLER_TRACE
- if (CUSTOM_PHASE_TRACE1(GetRecyclerFlagsTable(), Js::IdleCollectPhase))
- {
- if (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesOnExit)
- {
- Output::Print(_u("%04X> Idle collect on exit: alloc %d\n"), ::GetCurrentThreadId(), autoHeap.uncollectedAllocBytes);
- }
- else
- {
- Output::Print(_u("%04X> Idle collect on exit: time %d\n"), ::GetCurrentThreadId(), tickCountNextCollection - GetTickCount());
- }
- Output::Flush();
- }
- #endif
- this->CollectNow<CollectNowConcurrent>();
- return false;
- }
- Assert(!this->CollectionInProgress());
- // Idle GC use the size heuristic. Only need to schedule on if we passed it.
- return (autoHeap.uncollectedAllocBytes >= RecyclerHeuristic::IdleUncollectedAllocBytesCollection);
- }
- #if ENABLE_CONCURRENT_GC
- bool
- RecyclerParallelThread::StartConcurrent()
- {
- if (this->recycler->threadService->HasCallback())
- {
- // This may be the first time. If so, initialize by creating the doneEvent.
- if (this->concurrentWorkDoneEvent == NULL)
- {
- this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkDoneEvent == nullptr)
- {
- return false;
- }
- }
- Assert(concurrentThread == NULL);
- Assert(concurrentWorkReadyEvent == NULL);
- // Invoke thread service to process work
- if (!this->recycler->threadService->Invoke(RecyclerParallelThread::StaticBackgroundWorkCallback, this))
- {
- return false;
- }
- }
- else
- {
- // This may be the first time. If so, initialize and create thread.
- if (this->concurrentWorkDoneEvent == NULL)
- {
- return this->EnableConcurrent(false);
- }
- else
- {
- Assert(this->concurrentThread != NULL);
- Assert(this->concurrentWorkReadyEvent != NULL);
- // signal that thread has been initialized
- SetEvent(this->concurrentWorkReadyEvent);
- }
- }
- return true;
- }
- bool
- RecyclerParallelThread::EnableConcurrent(bool waitForThread)
- {
- this->synchronizeOnStartup = waitForThread;
- Assert(this->concurrentWorkDoneEvent == NULL);
- Assert(this->concurrentWorkReadyEvent == NULL);
- Assert(this->concurrentThread == NULL);
- this->concurrentWorkDoneEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkDoneEvent == nullptr)
- {
- return false;
- }
- this->concurrentWorkReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (this->concurrentWorkReadyEvent == nullptr)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- return false;
- }
- this->concurrentThread = (HANDLE)PlatformAgnostic::Thread::Create(Recycler::ConcurrentThreadStackSize, &RecyclerParallelThread::StaticThreadProc, this, PlatformAgnostic::Thread::ThreadInitStackSizeParamIsAReservation);
- if (this->concurrentThread != nullptr && waitForThread)
- {
- // Wait for thread to initialize
- HANDLE handle[2] = { this->concurrentWorkDoneEvent, this->concurrentThread };
- DWORD ret = WaitForMultipleObjectsEx(2, handle, FALSE, INFINITE, FALSE);
- if (ret == WAIT_OBJECT_0)
- {
- return true;
- }
- CloseHandle(concurrentThread);
- concurrentThread = nullptr;
- }
- if (this->concurrentThread == nullptr)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- CloseHandle(this->concurrentWorkReadyEvent);
- this->concurrentWorkReadyEvent = NULL;
- return false;
- }
- return true;
- }
- template <uint parallelId>
- void
- Recycler::ParallelWorkFunc()
- {
- Assert(parallelId == 0 || parallelId == 1);
- MarkContext * markContext = (parallelId == 0 ? &this->parallelMarkContext2 : &this->parallelMarkContext3);
- switch (this->collectionState)
- {
- case CollectionStateParallelMark:
- this->ProcessParallelMark(false, markContext);
- break;
- case CollectionStateBackgroundParallelMark:
- this->ProcessParallelMark(true, markContext);
- break;
- default:
- Assert(false);
- }
- }
- void
- RecyclerParallelThread::WaitForConcurrent()
- {
- Assert(this->concurrentThread != NULL || this->recycler->threadService->HasCallback());
- Assert(this->concurrentWorkDoneEvent != NULL);
- DWORD ret = WaitForSingleObject(concurrentWorkDoneEvent, INFINITE);
- Assert(ret == WAIT_OBJECT_0);
- }
- void
- RecyclerParallelThread::Shutdown()
- {
- Assert(this->recycler->collectionState == CollectionStateExit);
- if (this->recycler->threadService->HasCallback())
- {
- if (this->concurrentWorkDoneEvent != NULL)
- {
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- }
- }
- else
- {
- if (this->concurrentThread != NULL)
- {
- HANDLE handles[2] = { concurrentWorkDoneEvent, concurrentThread };
- SetEvent(concurrentWorkReadyEvent);
- // During process shutdown, OS might kill this (recycler parallel i.e. concurrent) thread and it will not get chance to signal concurrentWorkDoneEvent.
- // When we are performing shutdown of main (recycler) thread here, if we wait on concurrentWorkDoneEvent, WaitForObject() will never return.
- // Hence wait for concurrentWorkDoneEvent + concurrentThread so if concurrentThread got killed, WaitForObject() will return and we will
- // proceed further.
- DWORD fRet = WaitForMultipleObjectsEx(2, handles, FALSE, INFINITE, FALSE);
- AssertMsg(fRet != WAIT_FAILED, "Check handles passed to WaitForMultipleObjectsEx.");
- CloseHandle(this->concurrentWorkDoneEvent);
- this->concurrentWorkDoneEvent = NULL;
- CloseHandle(this->concurrentWorkReadyEvent);
- this->concurrentWorkReadyEvent = NULL;
- CloseHandle(this->concurrentThread);
- this->concurrentThread = NULL;
- }
- }
- Assert(this->concurrentThread == NULL);
- Assert(this->concurrentWorkReadyEvent == NULL);
- Assert(this->concurrentWorkDoneEvent == NULL);
- }
- // static
- unsigned int
- RecyclerParallelThread::StaticThreadProc(LPVOID lpParameter)
- {
- DWORD ret = (DWORD)-1;
- #if !DISABLE_SEH
- __try
- {
- #endif
- RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)lpParameter;
- Recycler * recycler = parallelThread->recycler;
- RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
- Assert(recycler->IsConcurrentEnabled());
- #if !defined(_UCRT)
- HMODULE dllHandle = NULL;
- if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&RecyclerParallelThread::StaticThreadProc, &dllHandle))
- {
- dllHandle = NULL;
- }
- #endif
- #ifdef ENABLE_JS_ETW
- // Create an ETW ActivityId for this thread, to help tools correlate ETW events we generate
- GUID activityId = { 0 };
- auto eventActivityIdControlResult = EventActivityIdControl(EVENT_ACTIVITY_CTRL_CREATE_SET_ID, &activityId);
- Assert(eventActivityIdControlResult == ERROR_SUCCESS);
- #endif
- // If this thread is created on demand we already have work to process and do not need to wait
- bool mustWait = parallelThread->synchronizeOnStartup;
- do
- {
- if (mustWait)
- {
- // Signal completion and wait for next work
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- DWORD result = WaitForSingleObject(parallelThread->concurrentWorkReadyEvent, INFINITE);
- Assert(result == WAIT_OBJECT_0);
- }
- if (recycler->collectionState == CollectionStateExit)
- {
- // Exit thread
- break;
- }
- // Invoke the workFunc to do real work
- (recycler->*workFunc)();
- // We always wait after the first time
- mustWait = true;
- }
- while (true);
- // Signal to main thread that we have stopped processing and will shut down.
- // Note that after this point, we cannot access anything on the Recycler instance
- // because the main thread may have torn it down already.
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- #if !defined(_UCRT)
- if (dllHandle)
- {
- FreeLibraryAndExitThread(dllHandle, 0);
- }
- #endif
- ret = 0;
- #if !DISABLE_SEH
- }
- __except(Recycler::ExceptFilter(GetExceptionInformation()))
- {
- Assert(false);
- }
- #endif
- return ret;
- }
- // static
- void
- RecyclerParallelThread::StaticBackgroundWorkCallback(void * callbackData)
- {
- RecyclerParallelThread * parallelThread = (RecyclerParallelThread *)callbackData;
- Recycler * recycler = parallelThread->recycler;
- RecyclerParallelThread::WorkFunc workFunc = parallelThread->workFunc;
- (recycler->*workFunc)();
- SetEvent(parallelThread->concurrentWorkDoneEvent);
- }
- #endif
- #ifdef RECYCLER_TRACE
- void
- Recycler::CaptureCollectionParam(CollectionFlags flags, bool repeat)
- {
- collectionParam.priorityBoostConcurrentSweepOverride = false;
- collectionParam.repeat = repeat;
- collectionParam.finishOnly = false;
- collectionParam.flags = flags;
- collectionParam.uncollectedAllocBytes = autoHeap.uncollectedAllocBytes;
- #if ENABLE_PARTIAL_GC
- collectionParam.uncollectedNewPageCountPartialCollect = this->uncollectedNewPageCountPartialCollect;
- collectionParam.inPartialCollectMode = inPartialCollectMode;
- collectionParam.uncollectedNewPageCount = autoHeap.uncollectedNewPageCount;
- collectionParam.unusedPartialCollectFreeBytes = autoHeap.unusedPartialCollectFreeBytes;
- #endif
- }
- void
- Recycler::PrintCollectTrace(Js::Phase phase, bool finish, bool noConcurrentWork)
- {
- if (GetRecyclerFlagsTable().Trace.IsEnabled(Js::RecyclerPhase) ||
- GetRecyclerFlagsTable().Trace.IsEnabled(phase))
- {
- const BOOL allocSize = collectionParam.flags & CollectHeuristic_AllocSize;
- const BOOL timedIfScriptActive = collectionParam.flags & CollectHeuristic_TimeIfScriptActive;
- const BOOL timedIfInScript = collectionParam.flags & CollectHeuristic_TimeIfInScript;
- const BOOL timed = (timedIfScriptActive && isScriptActive) || (timedIfInScript && isInScript) || (collectionParam.flags & CollectHeuristic_Time);
- const BOOL concurrent = collectionParam.flags & CollectMode_Concurrent;
- const BOOL finishConcurrent = collectionParam.flags & CollectOverride_FinishConcurrent;
- const BOOL exhaustive = collectionParam.flags & CollectMode_Exhaustive;
- const BOOL forceInThread = collectionParam.flags & CollectOverride_ForceInThread;
- const BOOL forceFinish = collectionParam.flags & CollectOverride_ForceFinish;
- #if ENABLE_PARTIAL_GC
- BOOL partial = collectionParam.flags & CollectMode_Partial ;
- #endif
- Output::Print(_u("%04X> RC(%p): %s%s%s%s%s%s%s:"), this->mainThreadId, this,
- collectionParam.domCollect? _u("[DOM] ") : _u(""),
- collectionParam.repeat? _u("[Repeat] "): _u(""),
- this->inDispose? _u("[Nested]") : _u(""),
- forceInThread? _u("Force In thread ") : _u(""),
- finish? _u("Finish ") : _u(""),
- exhaustive? _u("Exhaustive ") : _u(""),
- Js::PhaseNames[phase]);
- if (noConcurrentWork)
- {
- Assert(finish);
- Output::Print(_u(" No concurrent work"));
- }
- else if (collectionParam.finishOnly)
- {
- Assert(!collectionParam.repeat);
- Assert(finish);
- #if ENABLE_CONCURRENT_GC
- if (collectionState == CollectionStateRescanWait)
- {
- if (forceFinish)
- {
- Output::Print(_u(" Force finish mark and sweep"));
- }
- else if (concurrent && this->enableConcurrentSweep)
- {
- if (!collectionParam.priorityBoostConcurrentSweepOverride)
- {
- Output::Print(_u(" Finish mark and start concurrent sweep"));
- }
- else
- {
- Output::Print(_u(" Finish mark and sweep (priority boost overridden concurrent sweep)"));
- }
- }
- else
- {
- Output::Print(_u(" Finish mark and sweep"));
- }
- }
- else
- {
- Assert(collectionState == CollectionStateTransferSweptWait);
- if (forceFinish)
- {
- Output::Print(_u(" Force finish sweep"));
- }
- else
- {
- Output::Print(_u(" Finish sweep"));
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- }
- else
- {
- if (finish && !concurrent)
- {
- Output::Print(_u(" Not concurrent collect"));
- }
- if ((finish && finishConcurrent))
- {
- Output::Print(_u(" No heuristic"));
- }
- #if ENABLE_CONCURRENT_GC
- else if (finish && priorityBoost)
- {
- Output::Print(_u(" Priority boost no heuristic"));
- }
- #endif
- else
- {
- Output::SkipToColumn(50);
- bool byteCountUsed = false;
- bool timeUsed = false;
- #if ENABLE_PARTIAL_GC
- bool newPageUsed = false;
- if (phase == Js::PartialCollectPhase || phase == Js::ConcurrentPartialCollectPhase)
- {
- Assert(collectionParam.flags & CollectMode_Partial);
- newPageUsed = !!allocSize;
- }
- else if (partial && collectionParam.inPartialCollectMode && collectionParam.uncollectedNewPageCount > collectionParam.uncollectedNewPageCountPartialCollect)
- {
- newPageUsed = true;
- }
- else
- #endif // ENABLE_PARTIAL_GC
- {
- byteCountUsed = !!allocSize;
- timeUsed = !!timed;
- }
- Output::Print(byteCountUsed? _u("*") : (allocSize? _u(" ") : _u("~")));
- Output::Print(_u("B:%8d "), collectionParam.uncollectedAllocBytes);
- Output::Print(timeUsed? _u("*") : (timed? _u(" ") : _u("~")));
- Output::Print(_u("T:%4d "), -collectionParam.timeDiff);
- #if ENABLE_PARTIAL_GC
- if (collectionParam.inPartialCollectMode)
- {
- Output::Print(_u("L:%5d "), collectionParam.uncollectedNewPageCountPartialCollect);
- }
- else
- {
- Output::Print(_u("L:----- "));
- }
- Output::Print(newPageUsed? _u("*") : (partial? _u(" ") : _u("~")));
- Output::Print(_u("P:%5d(%9d) "), collectionParam.uncollectedNewPageCount, collectionParam.uncollectedNewPageCount * AutoSystemInfo::PageSize);
- Output::Print(_u("U:%8d"), collectionParam.unusedPartialCollectFreeBytes);
- #endif // ENABLE_PARTIAL_GC
- }
- }
- Output::Print(_u("\n"));
- Output::Flush();
- }
- }
- #endif
- #ifdef RECYCLER_STATS
- void
- Recycler::PrintHeapBlockStats(char16 const * name, HeapBlock::HeapBlockType type)
- {
- size_t liveCount = collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type];
- Output::Print(_u(" %6s : %5d %5d %5d %5.1f"), name,
- liveCount, collectionStats.heapBlockFreeCount[type], collectionStats.heapBlockCount[type],
- (double)collectionStats.heapBlockFreeCount[type] / (double)collectionStats.heapBlockCount[type] * 100);
- if (type < HeapBlock::SmallBlockTypeCount)
- {
- Output::Print(_u(" : %5d %6.1f : %5d %6.1f"),
- collectionStats.heapBlockSweptCount[type],
- (double)collectionStats.heapBlockSweptCount[type] / (double)liveCount * 100,
- collectionStats.heapBlockConcurrentSweptCount[type],
- (double)collectionStats.heapBlockConcurrentSweptCount[type] / (double)collectionStats.heapBlockSweptCount[type] * 100);
- }
- }
- void
- Recycler::PrintHeapBlockMemoryStats(char16 const * name, HeapBlock::HeapBlockType type)
- {
- size_t allocableFreeByteCount = collectionStats.heapBlockFreeByteCount[type];
- #if ENABLE_PARTIAL_GC
- size_t partialUnusedBytes = 0;
- if (this->enablePartialCollect)
- {
- partialUnusedBytes = allocableFreeByteCount
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[type];
- allocableFreeByteCount -= partialUnusedBytes;
- }
- #endif
- size_t totalByteCount = (collectionStats.heapBlockCount[type] - collectionStats.heapBlockFreeCount[type]) * AutoSystemInfo::PageSize;
- size_t liveByteCount = totalByteCount - collectionStats.heapBlockFreeByteCount[type];
- Output::Print(_u(" %6s: %10d %10d"), name, liveByteCount, allocableFreeByteCount);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect &&
- (type == HeapBlock::HeapBlockType::SmallNormalBlockType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
- #endif
- || type == HeapBlock::HeapBlockType::MediumNormalBlockType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
- #endif
- ))
- {
- Output::Print(_u(" %10d"), partialUnusedBytes);
- }
- else
- #endif
- {
- Output::Print(_u(" "));
- }
- Output::Print(_u(" %10d %6.1f"), totalByteCount,
- (double)allocableFreeByteCount / (double)totalByteCount * 100);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect &&
- (type == HeapBlock::HeapBlockType::SmallNormalBlockType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType
- #endif
- || type == HeapBlock::HeapBlockType::MediumNormalBlockType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockType
- #ifdef RECYCLER_WRITE_BARRIER
- || type == HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType
- || type == HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType
- #endif
- ))
- {
- Output::Print(_u(" %6.1f"), (double)partialUnusedBytes / (double)totalByteCount * 100);
- }
- #endif
- }
- void
- Recycler::PrintHeuristicCollectionStats()
- {
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("GC Trigger : %10s %10s %10s"), _u("Start"), _u("Continue"), _u("Finish"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Heuristics : %10s %10s %5s"), _u(""), _u(""), _u("%"));
- }
- #endif
- Output::Print(_u("\n"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" Alloc bytes : %10d %10d %10d"), collectionStats.startCollectAllocBytes, collectionStats.continueCollectAllocBytes, this->autoHeap.uncollectedAllocBytes);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Cost : %10d %10d %5.1f"), collectionStats.rescanRootBytes, collectionStats.estimatedPartialReuseBytes, collectionStats.collectCost * 100);
- }
- #endif
- Output::Print(_u("\n"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Efficacy : %10s %10s %5.1f\n"), _u(""), _u(""), collectionStats.collectEfficacy * 100);
- }
- #endif
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" New page : %10d %10s %10d"), collectionStats.startCollectNewPageCount, _u(""), autoHeap.uncollectedNewPageCount);
- Output::Print(_u(" | Partial Uncollect New Page : %10d %10d"), collectionStats.uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize, this->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize);
- Output::Print(_u("\n"));
- }
- #endif
- Output::Print(_u(" Finish try : %10d %10s %10s"), collectionStats.finishCollectTryCount, _u(""), _u(""));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Partial Reuse Min Free Bytes : %10d"), collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes * AutoSystemInfo::PageSize);
- }
- #endif
- Output::Print(_u("\n"));
- }
- void
- Recycler::PrintMarkCollectionStats()
- {
- size_t nonMark = collectionStats.tryMarkCount + collectionStats.tryMarkInteriorCount - collectionStats.remarkCount - collectionStats.markData.markCount;
- size_t invalidCount = nonMark - collectionStats.tryMarkNullCount - collectionStats.tryMarkUnalignedCount
- - collectionStats.tryMarkNonRecyclerMemoryCount
- - collectionStats.tryMarkInteriorNonRecyclerMemoryCount
- - collectionStats.tryMarkInteriorNullCount;
- size_t leafCount = collectionStats.markData.markCount - collectionStats.scanCount;
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Try Mark :%9s %5s %10s | Non-Mark : %9s %5s | Mark :%9s %5s \n"), _u("Count"), _u("%"), _u("Bytes"), _u("Count"), _u("%"), _u("Count"), _u("%"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" TryMark :%9d %10d | Null : %9d %5.1f | Scan :%9d %5.1f\n"),
- collectionStats.tryMarkCount, collectionStats.tryMarkCount * sizeof(void *),
- collectionStats.tryMarkNullCount, (double)collectionStats.tryMarkNullCount / (double)nonMark * 100,
- collectionStats.scanCount, (double)collectionStats.scanCount / (double)collectionStats.markData.markCount * 100);
- Output::Print(_u(" Non-Mark :%9d %5.1f | Unaligned : %9d %5.1f | Leaf :%9d %5.1f\n"),
- nonMark, (double)nonMark / (double)collectionStats.tryMarkCount * 100,
- collectionStats.tryMarkUnalignedCount, (double)collectionStats.tryMarkUnalignedCount / (double)nonMark * 100,
- leafCount, (double)leafCount / (double)collectionStats.markData.markCount * 100);
- Output::Print(_u(" Mark :%9d %5.1f %10d | Non GC : %9d %5.1f | Track :%9d\n"),
- collectionStats.markData.markCount, (double)collectionStats.markData.markCount / (double)collectionStats.tryMarkCount * 100, collectionStats.markData.markBytes,
- collectionStats.tryMarkNonRecyclerMemoryCount, (double)collectionStats.tryMarkNonRecyclerMemoryCount / (double)nonMark * 100,
- collectionStats.trackCount);
- Output::Print(_u(" Remark :%9d %5.1f | Invalid : %9d %5.1f \n"),
- collectionStats.remarkCount, (double)collectionStats.remarkCount / (double)collectionStats.tryMarkCount * 100,
- invalidCount, (double)invalidCount / (double)nonMark * 100);
- Output::Print(_u(" TryMark Int:%9d %10d | Null Int : %9d %5.1f | Root :%9d | New :%9d\n"),
- collectionStats.tryMarkInteriorCount, collectionStats.tryMarkInteriorCount * sizeof(void *),
- collectionStats.tryMarkInteriorNullCount, (double)collectionStats.tryMarkInteriorNullCount / (double)nonMark * 100,
- collectionStats.rootCount, collectionStats.markThruNewObjCount);
- Output::Print(_u(" | Non GC Int: %9d %5.1f | Stack :%9d | NewFalse:%9d\n"),
- collectionStats.tryMarkInteriorNonRecyclerMemoryCount, (double)collectionStats.tryMarkInteriorNonRecyclerMemoryCount / (double)nonMark * 100,
- collectionStats.stackCount, collectionStats.markThruFalseNewObjCount);
- }
- void
- Recycler::PrintBackgroundCollectionStat(RecyclerCollectionStats::MarkData const& markData)
- {
- Output::Print(_u("BgSmall : %5d %6d %10d | BgLarge : %5d %6d %10d | BgMark :%9d "),
- markData.rescanPageCount,
- markData.rescanObjectCount,
- markData.rescanObjectByteCount,
- markData.rescanLargePageCount,
- markData.rescanLargeObjectCount,
- markData.rescanLargeByteCount,
- markData.markCount);
- double markRatio = (double)markData.markCount / (double)collectionStats.markData.markCount * 100;
- if (markRatio == 100.0)
- {
- Output::Print(_u(" 100"));
- }
- else
- {
- Output::Print(_u("%4.1f"), markRatio);
- }
- Output::Print(_u("\n"));
- }
- void
- Recycler::PrintBackgroundCollectionStats()
- {
- #if ENABLE_CONCURRENT_GC
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("BgSmall : %5s %6s %10s | BgLarge : %5s %6s %10s | BgMark :%9s %4s %s\n"),
- _u("Pages"), _u("Count"), _u("Bytes"), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"), _u("%"), _u("NonLeafBytes %"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[0]);
- for (uint repeatCount = 1; repeatCount < RecyclerHeuristic::MaxBackgroundRepeatMarkCount; repeatCount++)
- {
- if (collectionStats.backgroundMarkData[repeatCount].markCount == 0)
- {
- break;
- }
- collectionStats.backgroundMarkData[repeatCount].rescanPageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanPageCount;
- collectionStats.backgroundMarkData[repeatCount].rescanObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectCount;
- collectionStats.backgroundMarkData[repeatCount].rescanObjectByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanObjectByteCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargePageCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargePageCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargeObjectCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeObjectCount;
- collectionStats.backgroundMarkData[repeatCount].rescanLargeByteCount -= collectionStats.backgroundMarkData[repeatCount - 1].rescanLargeByteCount;
- this->PrintBackgroundCollectionStat(collectionStats.backgroundMarkData[repeatCount]);
- }
- #endif
- }
- void
- Recycler::PrintMemoryStats()
- {
- Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Memory (Bytes) %4s %10s %10s %10s %6s %6s\n"), _u("Live"), _u("Free"), _u("Unused"), _u("Total"), _u("Free%"), _u("Unused%"));
- Output::Print(_u("----------------------------------------------------------------------------------------------------------------\n"));
- PrintHeapBlockMemoryStats(_u("Small"), HeapBlock::SmallNormalBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockMemoryStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("SmFinSWB"), HeapBlock::SmallFinalizableBlockWithBarrierType);
- Output::Print(_u("\n"));
- #endif
- PrintHeapBlockMemoryStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockMemoryStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
- Output::Print(_u("\n"));
- PrintHeapBlockMemoryStats(_u("MdFinSWB"), HeapBlock::MediumFinalizableBlockWithBarrierType);
- Output::Print(_u("\n"));
- #endif
- PrintHeapBlockMemoryStats(_u("MdLeaf"), HeapBlock::MediumLeafBlockType);
- Output::Print(_u("\n"));
- size_t largeHeapBlockUnusedByteCount = collectionStats.largeHeapBlockTotalByteCount - collectionStats.largeHeapBlockUsedByteCount
- - collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType];
- Output::Print(_u(" Large: %10d %10d %10d %10d %6.1f %6.1f\n"),
- collectionStats.largeHeapBlockUsedByteCount,
- collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType],
- largeHeapBlockUnusedByteCount,
- collectionStats.largeHeapBlockTotalByteCount,
- (double)collectionStats.heapBlockFreeByteCount[HeapBlock::LargeBlockType] / (double)collectionStats.largeHeapBlockTotalByteCount * 100,
- (double)largeHeapBlockUnusedByteCount / (double)collectionStats.largeHeapBlockTotalByteCount * 100);
- Output::Print(_u("\nSmall heap block zeroing stats since last GC\n"));
- Output::Print(_u("Number of blocks with sweep state empty: normal=%d finalizable=%d leaf=%d\nNumber of blocks zeroed: %d\n"),
- collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::SmallNormalBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::SmallFinalizableBlockWithBarrierType]
- #endif
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumNormalBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockType]
- #ifdef RECYCLER_WRITE_BARRIER
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumFinalizableBlockWithBarrierType]
- #endif
- , collectionStats.numEmptySmallBlocks[HeapBlock::SmallLeafBlockType]
- + collectionStats.numEmptySmallBlocks[HeapBlock::MediumLeafBlockType],
- collectionStats.numZeroedOutSmallBlocks);
- }
- void
- Recycler::PrintCollectStats()
- {
- Output::Print(_u("Collection Stats:\n"));
- PrintHeuristicCollectionStats();
- PrintMarkCollectionStats();
- PrintBackgroundCollectionStats();
- size_t freeCount = collectionStats.objectSweptCount - collectionStats.objectSweptFreeListCount;
- size_t freeBytes = collectionStats.objectSweptBytes - collectionStats.objectSweptFreeListBytes;
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("Rescan : %5s %6s %10s | Track : %5s | "), _u("Pages"), _u("Count"), _u("Bytes"), _u("Count"));
- #endif
- Output::Print(_u("Sweep : %7s | SweptObj : %5s %5s %10s\n"), _u("Count"), _u("Count"), _u("%%"), _u("Bytes"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u(" Small : "));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("%5d %6d %10d | "), collectionStats.markData.rescanPageCount, collectionStats.markData.rescanObjectCount, collectionStats.markData.rescanObjectByteCount);
- #endif
- #if ENABLE_CONCURRENT_GC
- Output::Print(_u("Process : %5d | "), collectionStats.trackedObjectCount);
- #else
- Output::Print(_u(" | "));
- #endif
- Output::Print(_u(" Scan : %7d | Free : %6d %5.1f %10d\n"),
- collectionStats.objectSweepScanCount,
- freeCount, (double)freeCount / (double) collectionStats.objectSweptCount * 100, freeBytes);
- Output::Print(_u(" Large : "));
- #if ENABLE_PARTIAL_GC || ENABLE_CONCURRENT_GC
- Output::Print(_u("%5d %6d %10d | "),
- collectionStats.markData.rescanLargePageCount, collectionStats.markData.rescanLargeObjectCount, collectionStats.markData.rescanLargeByteCount);
- #endif
- #if ENABLE_PARTIAL_GC
- Output::Print(_u("Client : %5d | "), collectionStats.clientTrackedObjectCount);
- #else
- Output::Print(_u(" | "));
- #endif
- Output::Print(_u(" Finalize : %7d | Free List: %6d %5.1f %10d\n"),
- collectionStats.finalizeSweepCount,
- collectionStats.objectSweptFreeListCount, (double)collectionStats.objectSweptFreeListCount / (double) collectionStats.objectSweptCount * 100, collectionStats.objectSweptFreeListBytes);
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- Output::Print(_u("SweptBlk: Live Free Total Free%% : Swept Swept%% : CSwpt CSwpt%%"));
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Partial : Count Bytes Existing"));
- }
- #endif
- Output::Print(_u("\n"));
- Output::Print(_u("---------------------------------------------------------------------------------------------------------------\n"));
- PrintHeapBlockStats(_u("Small"), HeapBlock::SmallNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Reuse : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallNormalBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallNormalBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockStats(_u("SmSWB"), HeapBlock::SmallNormalBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallNormalBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("SmFin"), HeapBlock::SmallFinalizableBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- #endif
- // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
- PrintHeapBlockStats(_u("SmLeaf"), HeapBlock::SmallLeafBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("Medium"), HeapBlock::MediumNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Reuse : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumNormalBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumNormalBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- #ifdef RECYCLER_WRITE_BARRIER
- PrintHeapBlockStats(_u("MdSWB"), HeapBlock::MediumNormalBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumNormalBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumNormalBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintHeapBlockStats(_u("MdFin"), HeapBlock::MediumFinalizableBlockWithBarrierType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | Unused : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::MediumFinalizableBlockWithBarrierType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::MediumFinalizableBlockWithBarrierType]);
- }
- #endif
- Output::Print(_u("\n"));
- #endif
- // TODO: This seems suspicious- why are we looking at smallNonLeaf while print out leaf...
- PrintHeapBlockStats(_u("MdLeaf"), HeapBlock::MediumNormalBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | ReuseFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialReuseCount[HeapBlock::MediumFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialReuseBytes[HeapBlock::MediumFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- // TODO: This can't possibly be correct...check on this later
- PrintHeapBlockStats(_u("Large"), HeapBlock::LargeBlockType);
- #if ENABLE_PARTIAL_GC
- if (this->enablePartialCollect)
- {
- Output::Print(_u(" | UnusedFin : %5d %10d %10d"),
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType],
- collectionStats.smallNonLeafHeapBlockPartialUnusedCount[HeapBlock::SmallFinalizableBlockType] * AutoSystemInfo::PageSize
- - collectionStats.smallNonLeafHeapBlockPartialUnusedBytes[HeapBlock::SmallFinalizableBlockType]);
- }
- #endif
- Output::Print(_u("\n"));
- PrintMemoryStats();
- Output::Flush();
- }
- #endif
- #ifdef RECYCLER_ZERO_MEM_CHECK
- void
- Recycler::VerifyZeroFill(void * address, size_t size)
- {
- byte expectedFill = 0;
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- expectedFill = Recycler::VerifyMemFill;
- }
- #endif
- for (uint i = 0; i < size; i++)
- {
- Assert(((byte *)address)[i] == expectedFill);
- }
- }
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- void
- Recycler::FillCheckPad(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
- {
- if (this->VerifyEnabled())
- {
- void* addressToVerify = address;
- size_t sizeToVerify = alignedAllocSize;
- if (objectAlreadyInitialized)
- {
- addressToVerify = ((char*) address + size);
- sizeToVerify = (alignedAllocSize - size);
- }
- // Actually this is filling the non-pad to zero
- VerifyCheckFill(addressToVerify, sizeToVerify - sizeof(size_t));
- FillPadNoCheck(address, size, alignedAllocSize, objectAlreadyInitialized);
- }
- }
- void
- Recycler::FillPadNoCheck(void * address, size_t size, size_t alignedAllocSize, bool objectAlreadyInitialized)
- {
- // Ignore the first word
- if (!objectAlreadyInitialized && size > sizeof(FreeObject))
- {
- memset((char *)address + sizeof(FreeObject), 0, size - sizeof(FreeObject));
- }
- // write the pad size at the end;
- *(size_t *)((char *)address + alignedAllocSize - sizeof(size_t)) = alignedAllocSize - size;
- }
- void Recycler::Verify(Js::Phase phase)
- {
- if (verifyEnabled && (!this->CollectionInProgress()))
- {
- if (GetRecyclerFlagsTable().RecyclerVerify.IsEnabled(phase))
- {
- autoHeap.Verify();
- }
- }
- }
- void Recycler::VerifyCheck(BOOL cond, char16 const * msg, void * address, void * corruptedAddress)
- {
- if (!(cond))
- {
- fwprintf(stderr, _u("RECYCLER CORRUPTION: StartAddress=%p CorruptedAddress=%p: %s"), address, corruptedAddress, msg);
- Js::Throw::FatalInternalError();
- }
- }
- void Recycler::VerifyCheckFill(void * address, size_t size)
- {
- for (byte * i = (byte *)address; i < (byte *)address + size; i++)
- {
- Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("memory written after freed"), address, i);
- }
- }
- void Recycler::VerifyCheckPadExplicitFreeList(void * address, size_t size)
- {
- size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
- size_t padding = *paddingAddress;
- #pragma warning(suppress:4310)
- Assert(padding != (size_t)0xCACACACACACACACA); // Explicit free objects have to have been initialized at some point before they were freed
- Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
- for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
- {
- Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
- }
- }
- void Recycler::VerifyCheckPad(void * address, size_t size)
- {
- size_t * paddingAddress = (size_t *)((byte *)address + size - sizeof(size_t));
- size_t padding = *paddingAddress;
- #pragma warning(suppress:4310)
- if (padding == (size_t)0xCACACACACACACACA)
- {
- // Nascent block have objects that are not initialized with pad size
- Recycler::VerifyCheckFill(address, size);
- return;
- }
- Recycler::VerifyCheck(padding >= verifyPad + sizeof(size_t) && padding < size, _u("Invalid padding size"), address, paddingAddress);
- for (byte * i = (byte *)address + size - padding; i < (byte *)paddingAddress; i++)
- {
- Recycler::VerifyCheck(*i == Recycler::VerifyMemFill, _u("buffer overflow"), address, i);
- }
- }
- #endif
- Recycler::AutoSetupRecyclerForNonCollectingMark::AutoSetupRecyclerForNonCollectingMark(Recycler& recycler, bool setupForHeapEnumeration)
- : m_recycler(recycler), m_setupDone(false)
- {
- if (! setupForHeapEnumeration)
- {
- DoCommonSetup();
- }
- }
- void Recycler::AutoSetupRecyclerForNonCollectingMark::DoCommonSetup()
- {
- Assert(m_recycler.collectionState == CollectionStateNotCollecting || m_recycler.collectionState == CollectionStateExit);
- #if ENABLE_CONCURRENT_GC
- Assert(!m_recycler.DoQueueTrackedObject());
- #endif
- #if ENABLE_PARTIAL_GC
- // We need to get out of partial collect before we do the mark because we
- // will mess with the free bit vector state
- // GC-CONSIDER: don't mess with the free bit vector?
- if (m_recycler.inPartialCollectMode)
- {
- m_recycler.FinishPartialCollect();
- }
- #endif
- m_previousCollectionState = m_recycler.collectionState;
- #ifdef RECYCLER_STATS
- m_previousCollectionStats = m_recycler.collectionStats;
- memset(&m_recycler.collectionStats, 0, sizeof(RecyclerCollectionStats));
- #endif
- m_setupDone = true;
- }
- void Recycler::AutoSetupRecyclerForNonCollectingMark::SetupForHeapEnumeration()
- {
- Assert(!m_recycler.isHeapEnumInProgress);
- Assert(!m_recycler.allowAllocationDuringHeapEnum);
- m_recycler.EnsureNotCollecting();
- DoCommonSetup();
- m_recycler.ResetMarks(ResetMarkFlags_HeapEnumeration);
- m_recycler.collectionState = CollectionStateNotCollecting;
- m_recycler.isHeapEnumInProgress = true;
- m_recycler.isCollectionDisabled = true;
- }
- Recycler::AutoSetupRecyclerForNonCollectingMark::~AutoSetupRecyclerForNonCollectingMark()
- {
- Assert(m_setupDone);
- Assert(!m_recycler.allowAllocationDuringHeapEnum);
- #ifdef RECYCLER_STATS
- m_recycler.collectionStats = m_previousCollectionStats;
- #endif
- m_recycler.collectionState = m_previousCollectionState;
- m_recycler.isHeapEnumInProgress = false;
- m_recycler.isCollectionDisabled = false;
- }
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- bool Recycler::DumpObjectGraph(RecyclerObjectGraphDumper::Param * param)
- {
- bool succeeded = false;
- bool isExited = (this->collectionState == CollectionStateExit);
- if (isExited)
- {
- this->collectionState = CollectionStateNotCollecting;
- }
- if (this->collectionState != CollectionStateNotCollecting)
- {
- Output::Print(_u("Can't dump object graph when collecting\n"));
- Output::Flush();
- return succeeded;
- }
- BEGIN_NO_EXCEPTION
- {
- RecyclerObjectGraphDumper objectGraphDumper(this, param);
- Recycler::AutoSetupRecyclerForNonCollectingMark AutoSetupRecyclerForNonCollectingMark(*this);
- AutoRestoreValue<bool> skipStackToggle(&this->skipStack, this->skipStack || (param && param->skipStack));
- this->Mark();
- this->objectGraphDumper = nullptr;
- #ifdef RECYCLER_STATS
- if (param)
- {
- param->stats = this->collectionStats;
- }
- #endif
- succeeded = !objectGraphDumper.isOutOfMemory;
- }
- END_NO_EXCEPTION
- if (isExited)
- {
- this->collectionState = CollectionStateExit;
- }
- if (!succeeded)
- {
- Output::Print(_u("Out of memory dumping object graph\n"));
- }
- Output::Flush();
- return succeeded;
- }
- void
- Recycler::DumpObjectDescription(void *objectAddress)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- type_info const * typeinfo = nullptr;
- bool isArray = false;
- if (this->trackerDictionary)
- {
- TrackerData * trackerData = GetTrackerData(objectAddress);
- if (trackerData != nullptr)
- {
- typeinfo = trackerData->typeinfo;
- isArray = trackerData->isArray;
- }
- else
- {
- Assert(false);
- }
- }
- RecyclerObjectDumper::DumpObject(typeinfo, isArray, objectAddress);
- #else
- Output::Print(_u("Address %p"), objectAddress);
- #endif
- }
- #endif
- #ifdef RECYCLER_STRESS
- // All stress mode collect art implicitly instantiate here
- bool
- Recycler::StressCollectNow()
- {
- if (this->recyclerStress)
- {
- this->CollectNow<CollectStress>();
- return true;
- }
- #if ENABLE_CONCURRENT_GC
- else if (this->recyclerBackgroundStress)
- {
- this->CollectNow<CollectBackgroundStress>();
- return true;
- }
- else if ((this->enableConcurrentMark || this->enableConcurrentSweep)
- && (this->recyclerConcurrentStress
- || this->recyclerConcurrentRepeatStress))
- {
- #if ENABLE_PARTIAL_GC
- if (this->recyclerPartialStress)
- {
- this->CollectNow<CollectConcurrentPartialStress>();
- return true;
- }
- else
- #endif // ENABLE_PARTIAL_GC
- {
- this->CollectNow<CollectConcurrentStress>();
- return true;
- }
- }
- #endif // ENABLE_CONCURRENT_GC
- #if ENABLE_PARTIAL_GC
- else if (this->recyclerPartialStress)
- {
- this->CollectNow<CollectPartialStress>();
- return true;
- }
- #endif // ENABLE_PARTIAL_GC
- return false;
- }
- #endif // RECYCLER_STRESS
- #ifdef TRACK_ALLOC
- Recycler *
- Recycler::TrackAllocInfo(TrackAllocData const& data)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty());
- nextAllocData = data;
- }
- #endif
- return this;
- }
- void
- Recycler::ClearTrackAllocInfo(TrackAllocData* data/* = NULL*/)
- {
- #ifdef PROFILE_RECYCLER_ALLOC
- if (this->trackerDictionary != nullptr)
- {
- AssertMsg(!nextAllocData.IsEmpty(), "Missing tracking information for this allocation, are you not using the macros?");
- if (data)
- {
- *data = nextAllocData;
- }
- nextAllocData.Clear();
- }
- #endif
- }
- #ifdef PROFILE_RECYCLER_ALLOC
- bool
- Recycler::DoProfileAllocTracker()
- {
- bool doTracker = false;
- #ifdef RECYCLER_DUMP_OBJECT_GRAPH
- doTracker = Js::Configuration::Global.flags.DumpObjectGraphOnExit
- || Js::Configuration::Global.flags.DumpObjectGraphOnCollect
- || Js::Configuration::Global.flags.DumpObjectGraphOnEnum;
- #endif
- #ifdef LEAK_REPORT
- if (Js::Configuration::Global.flags.IsEnabled(Js::LeakReportFlag))
- {
- doTracker = true;
- }
- #endif
- #ifdef CHECK_MEMORY_LEAK
- if (Js::Configuration::Global.flags.CheckMemoryLeak)
- {
- doTracker = true;
- }
- #endif
- return doTracker || MemoryProfiler::DoTrackRecyclerAllocation();
- }
- void
- Recycler::InitializeProfileAllocTracker()
- {
- if (DoProfileAllocTracker())
- {
- trackerDictionary = NoCheckHeapNew(TypeInfotoTrackerItemMap, &NoCheckHeapAllocator::Instance, 163);
- #pragma prefast(suppress:6031, "InitializeCriticalSectionAndSpinCount always succeed since Vista. No need to check return value");
- InitializeCriticalSectionAndSpinCount(&trackerCriticalSection, 1000);
- }
- nextAllocData.Clear();
- }
- void
- Recycler::TrackAllocCore(void * object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
- {
- auto&& typeInfo = trackAllocData.GetTypeInfo();
- if (CONFIG_FLAG(KeepRecyclerTrackData))
- {
- TrackFree((char*)object, size);
- }
- Assert(GetTrackerData(object) == nullptr || GetTrackerData(object) == &TrackerData::ExplicitFreeListObjectData);
- Assert(typeInfo != nullptr);
- TrackerItem * item;
- size_t allocCount = trackAllocData.GetCount();
- size_t itemSize = (size - trackAllocData.GetPlusSize());
- bool isArray;
- if (allocCount != (size_t)-1)
- {
- isArray = true;
- itemSize = itemSize / allocCount;
- }
- else
- {
- isArray = false;
- allocCount = 1;
- }
-
- if (!trackerDictionary->TryGetValue(typeInfo, &item))
- {
- if (CONFIG_FLAG(KeepRecyclerTrackData) && isArray) // type info is not useful record stack instead
- {
- size_t stackTraceSize = 16 * sizeof(void*);
- item = NoCheckHeapNewPlus(stackTraceSize, TrackerItem, typeInfo);
- StackBackTrace::Capture((char*)&item[1], stackTraceSize, 0);
- }
- else
- {
- item = NoCheckHeapNew(TrackerItem, typeInfo);
- }
- item->instanceData.ItemSize = itemSize;
- item->arrayData.ItemSize = itemSize;
- trackerDictionary->Item(typeInfo, item);
- }
- else
- {
- Assert(item->instanceData.typeinfo == typeInfo);
- Assert(item->instanceData.ItemSize == itemSize);
- Assert(item->arrayData.ItemSize == itemSize);
- }
- TrackerData& data = (isArray)? item->arrayData : item->instanceData;
- data.ItemCount += allocCount;
- data.AllocCount++;
- data.ReqSize += size;
- data.AllocSize += HeapInfo::GetAlignedSizeNoCheck(size);
- #ifdef TRACE_OBJECT_LIFETIME
- data.TraceLifetime = traceLifetime;
- if (traceLifetime)
- {
- Output::Print(data.isArray ? _u("Allocated %S[] %p\n") : _u("Allocated %S %p\n"), data.typeinfo->name(), object);
- }
- #endif
- #ifdef PERF_COUNTERS
- ++data.counter;
- data.sizeCounter += HeapInfo::GetAlignedSizeNoCheck(size);
- #endif
- SetTrackerData(object, &data);
- }
- void* Recycler::TrackAlloc(void* object, size_t size, const TrackAllocData& trackAllocData, bool traceLifetime)
- {
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty()); // should have been cleared
- EnterCriticalSection(&trackerCriticalSection);
- TrackAllocCore(object, size, trackAllocData);
- LeaveCriticalSection(&trackerCriticalSection);
- }
- return object;
- }
- void
- Recycler::TrackIntegrate(__in_ecount(blockSize) char * blockAddress, size_t blockSize, size_t allocSize, size_t objectSize, const TrackAllocData& trackAllocData)
- {
- if (this->trackerDictionary != nullptr)
- {
- Assert(nextAllocData.IsEmpty()); // should have been cleared
- EnterCriticalSection(&trackerCriticalSection);
- char * address = blockAddress;
- char * blockEnd = blockAddress + blockSize;
- while (address + allocSize <= blockEnd)
- {
- TrackAllocCore(address, objectSize, trackAllocData);
- address += allocSize;
- }
- LeaveCriticalSection(&trackerCriticalSection);
- }
- }
- BOOL Recycler::TrackFree(const char* address, size_t size)
- {
- if (this->trackerDictionary != nullptr)
- {
- EnterCriticalSection(&trackerCriticalSection);
- TrackerData * data = GetTrackerData((char *)address);
- if (data != nullptr)
- {
- if (data != &TrackerData::EmptyData)
- {
- #ifdef PERF_COUNTERS
- --data->counter;
- data->sizeCounter -= size;
- #endif
- if (data->typeinfo == &typeid(RecyclerWeakReferenceBase))
- {
- TrackFreeWeakRef((RecyclerWeakReferenceBase *)address);
- }
- data->FreeSize += size;
- data->FreeCount++;
- #ifdef TRACE_OBJECT_LIFETIME
- if (data->TraceLifetime)
- {
- Output::Print(data->isArray ? _u("Freed %S[] %p\n") : _u("Freed %S %p\n"), data->typeinfo->name(), address);
- }
- #endif
- }
- SetTrackerData((char *)address, nullptr);
- }
- else
- {
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- Assert(false);
- }
- }
- LeaveCriticalSection(&trackerCriticalSection);
- }
- return true;
- }
- Recycler::TrackerData *
- Recycler::GetTrackerData(void * address)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(address);
- Assert(heapBlock != nullptr);
- return (Recycler::TrackerData *)heapBlock->GetTrackerData(address);
- }
- void
- Recycler::SetTrackerData(void * address, TrackerData * data)
- {
- HeapBlock * heapBlock = this->FindHeapBlock(address);
- Assert(heapBlock != nullptr);
- heapBlock->SetTrackerData(address, data);
- }
- void
- Recycler::TrackUnallocated(__in char* address, __in char *endAddress, size_t sizeCat)
- {
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- if (this->trackerDictionary != nullptr)
- {
- EnterCriticalSection(&trackerCriticalSection);
- while (address + sizeCat <= endAddress)
- {
- Assert(GetTrackerData(address) == nullptr);
- SetTrackerData(address, &TrackerData::EmptyData);
- address += sizeCat;
- }
- LeaveCriticalSection(&trackerCriticalSection);
- }
- }
- }
- void
- Recycler::TrackAllocWeakRef(RecyclerWeakReferenceBase * weakRef)
- {
- Assert(weakRef->typeInfo != nullptr);
- #if DBG && defined(PERF_COUNTERS)
- if (this->trackerDictionary != nullptr)
- {
- TrackerItem * item;
- if (trackerDictionary->TryGetValue(weakRef->typeInfo, &item))
- {
- weakRef->counter = &item->weakRefCounter;
- }
- else
- {
- weakRef->counter = &PerfCounter::RecyclerTrackerCounterSet::GetWeakRefPerfCounter(weakRef->typeInfo);
- }
- ++(*weakRef->counter);
- }
- #endif
- }
- void
- Recycler::TrackFreeWeakRef(RecyclerWeakReferenceBase * weakRef)
- {
- #if DBG && defined(PERF_COUNTERS)
- if (weakRef->counter != nullptr)
- {
- --(*weakRef->counter);
- }
- #endif
- }
- void
- Recycler::PrintAllocStats()
- {
- if (this->trackerDictionary == nullptr)
- {
- return;
- }
- size_t itemCount = 0;
- int allocCount = 0;
- int64 reqSize = 0;
- int64 allocSize = 0;
- int freeCount = 0;
- int64 freeSize = 0;
- Output::Print(_u("=================================================================================================================\n"));
- Output::Print(_u("Recycler Allocations\n"));
- Output::Print(_u("=================================================================================================================\n"));
- Output::Print(_u("ItemSize ItemCount AllocCount RequestSize AllocSize FreeCount FreeSize DiffCount DiffSize \n"));
- Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
- for (int i = 0; i < trackerDictionary->Count(); i++)
- {
- TrackerItem * item = trackerDictionary->GetValueAt(i);
- type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
- if (item->instanceData.AllocCount != 0)
- {
- Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S\n"),
- item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
- item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
- itemCount += item->instanceData.ItemCount;
- allocCount += item->instanceData.AllocCount;
- reqSize += item->instanceData.ReqSize;
- allocSize += item->instanceData.AllocSize;
- freeCount += item->instanceData.FreeCount;
- freeSize += item->instanceData.FreeSize;
- }
- if (item->arrayData.AllocCount != 0)
- {
- Output::Print(_u("%8d %10d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d %S[]\n"),
- item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
- item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
- itemCount += item->arrayData.ItemCount;
- allocCount += item->arrayData.AllocCount;
- reqSize += item->arrayData.ReqSize;
- allocSize += item->arrayData.AllocSize;
- freeCount += item->arrayData.FreeCount;
- freeSize += item->arrayData.FreeSize;
- }
- }
- Output::Print(_u("-------- ---------- ---------- --------------- --------------- ---------- --------------- ---------- ---------------\n"));
- Output::Print(_u(" %8d %10d %15I64d %15I64d %10d %15I64d %10d %15I64d **Total**\n"),
- itemCount, allocCount, reqSize, allocSize, freeCount, freeSize, allocCount - freeCount, allocSize - freeSize);
- #ifdef EXCEL_FRIENDLY_DUMP
- Output::Print(_u("\nExcel friendly version\nItemSize\tItemCount\tAllocCount\tRequestSize\tAllocSize\tFreeCount\tFreeSize\tDiffCount\tDiffSize\tType\n"));
- for (int i = 0; i < trackerDictionary->Count(); i++)
- {
- TrackerItem * item = trackerDictionary->GetValueAt(i);
- type_info const * typeinfo = trackerDictionary->GetKeyAt(i);
- if (item->instanceData.AllocCount != 0)
- {
- Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S\n"),
- item->instanceData.ItemSize, item->instanceData.ItemCount, item->instanceData.AllocCount, item->instanceData.ReqSize,
- item->instanceData.AllocSize, item->instanceData.FreeCount, item->instanceData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->instanceData.AllocSize - item->instanceData.FreeSize, typeinfo->name());
- }
- if (item->arrayData.AllocCount != 0)
- {
- Output::Print(_u("%d\t%d\t%d\t%I64d\t%I64d\t%d\t%I64d\t%d\t%I64d\t%S[]\n"),
- item->arrayData.ItemSize, item->arrayData.ItemCount, item->arrayData.AllocCount, item->arrayData.ReqSize,
- item->arrayData.AllocSize, item->arrayData.FreeCount, item->arrayData.FreeSize,
- item->instanceData.AllocCount - item->instanceData.FreeCount, item->arrayData.AllocSize - item->arrayData.FreeSize, typeinfo->name());
- }
- }
- #endif // EXCEL_FRIENDLY_DUMP
- Output::Flush();
- }
- #endif // PROFILE_RECYCLER_ALLOC
- #endif // TRACK_ALLOC
- #ifdef RECYCLER_VERIFY_MARK
- void
- Recycler::VerifyMark()
- {
- VerifyMarkRoots();
- // Can't really verify stack since the recycler code between ScanStack to now may have introduce false references.
- // VerifyMarkStack();
- autoHeap.VerifyMark();
- }
- void
- Recycler::VerifyMarkRoots()
- {
- {
- this->VerifyMark(transientPinnedObject);
- pinnedObjectMap.Map([this](void * obj, PinRecord const &refCount)
- {
- if (refCount == 0)
- {
- Assert(this->hasPendingUnpinnedObject);
- }
- else
- {
- // Use the pinrecord as the source reference
- this->VerifyMark(obj);
- }
- });
- }
- DList<GuestArenaAllocator, HeapAllocator>::Iterator guestArenaIter(&guestArenaList);
- while (guestArenaIter.Next())
- {
- if (guestArenaIter.Data().pendingDelete)
- {
- Assert(this->hasPendingDeleteGuestArena);
- }
- else
- {
- VerifyMarkArena(&guestArenaIter.Data());
- }
- }
- DList<ArenaData *, HeapAllocator>::Iterator externalGuestArenaIter(&externalGuestArenaList);
- while (externalGuestArenaIter.Next())
- {
- VerifyMarkArena(externalGuestArenaIter.Data());
- }
- // We can't check external roots here
- }
- void
- Recycler::VerifyMarkArena(ArenaData * alloc)
- {
- VerifyMarkBigBlockList(alloc->GetBigBlocks(false));
- VerifyMarkBigBlockList(alloc->GetFullBlocks());
- VerifyMarkArenaMemoryBlockList(alloc->GetMemoryBlocks());
- }
- void
- Recycler::VerifyMarkBigBlockList(BigBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- BigBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t slotCount = blockp->currentByte / sizeof(void*);
- scanRootBytes += blockp->currentByte;
- for (size_t i=0; i < slotCount; i++)
- {
- VerifyMark(base[i]);
- }
- blockp = blockp->nextBigBlock;
- }
- }
- void
- Recycler::VerifyMarkArenaMemoryBlockList(ArenaMemoryBlock * memoryBlocks)
- {
- size_t scanRootBytes = 0;
- ArenaMemoryBlock *blockp = memoryBlocks;
- while (blockp != NULL)
- {
- void** base=(void**)blockp->GetBytes();
- size_t slotCount = blockp->nbytes / sizeof(void*);
- scanRootBytes += blockp->nbytes;
- for (size_t i=0; i< slotCount; i++)
- {
- VerifyMark(base[i]);
- }
- blockp = blockp->next;
- }
- }
- void
- Recycler::VerifyMarkStack()
- {
- SAVE_THREAD_CONTEXT();
- void ** stackTop = (void**) this->savedThreadContext.GetStackTop();
- void * stackStart = GetStackBase();
- Assert(stackStart > stackTop);
- for (;stackTop < stackStart; stackTop++)
- {
- void* candidate = *stackTop;
- VerifyMark(candidate);
- }
- void** registers = this->savedThreadContext.GetRegisters();
- for (int i = 0; i < SavedRegisterState::NumRegistersToSave; i++)
- {
- VerifyMark(registers[i]);
- }
- }
- bool
- Recycler::VerifyMark(void * candidate)
- {
- void * realAddress;
- HeapBlock * heapBlock;
- if (this->enableScanInteriorPointers)
- {
- heapBlock = heapBlockMap.GetHeapBlock(candidate);
- if (heapBlock == nullptr)
- {
- return false;
- }
- realAddress = heapBlock->GetRealAddressFromInterior(candidate);
- if (realAddress == nullptr)
- {
- return false;
- }
- }
- else
- {
- heapBlock = this->FindHeapBlock(candidate);
- if (heapBlock == nullptr)
- {
- return false;
- }
- realAddress = candidate;
- }
- return heapBlock->VerifyMark(realAddress);
- }
- #endif
- ArenaAllocator *
- Recycler::CreateGuestArena(char16 const * name, void (*outOfMemoryFunc)())
- {
- // Note, guest arenas use the large block allocator.
- return guestArenaList.PrependNode(&HeapAllocator::Instance, name, &recyclerLargeBlockPageAllocator, outOfMemoryFunc);
- }
- void
- Recycler::DeleteGuestArena(ArenaAllocator * arenaAllocator)
- {
- GuestArenaAllocator * guestArenaAllocator = static_cast<GuestArenaAllocator *>(arenaAllocator);
- #if ENABLE_CONCURRENT_GC
- if (this->hasPendingConcurrentFindRoot)
- {
- // We are doing concurrent find root, don't modify the list and mark the arena to be delete
- // later when we do find root in thread.
- Assert(guestArenaList.HasElement(guestArenaAllocator));
- this->hasPendingDeleteGuestArena = true;
- guestArenaAllocator->pendingDelete = true;
- }
- else
- #endif
- {
- guestArenaList.RemoveElement(&HeapAllocator::Instance, guestArenaAllocator);
- }
- }
- #ifdef LEAK_REPORT
- void
- Recycler::ReportLeaks()
- {
- if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
- {
- if (GetRecyclerFlagsTable().ForceMemoryLeak)
- {
- AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
- struct FakeMemory { Field(int) f; };
- FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
- this->RootAddRef(f);
- }
- LeakReport::StartSection(_u("Object Graph"));
- LeakReport::StartRedirectOutput();
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.skipStack = true;
- if (!this->DumpObjectGraph(¶m))
- {
- LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
- LeakReport::Print(_u("ERROR: Out of memory generating leak report\n"));
- param.stats.markData.markCount = 0;
- }
- LeakReport::EndRedirectOutput();
- if (param.stats.markData.markCount != 0)
- {
- LeakReport::Print(_u("--------------------------------------------------------------------------------\n"));
- LeakReport::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
- param.stats.markData.markBytes, param.stats.markData.markCount);
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- LeakReport::StartSection(_u("Pinned object stack traces"));
- LeakReport::StartRedirectOutput();
- this->PrintPinnedObjectStackTraces();
- LeakReport::EndRedirectOutput();
- LeakReport::EndSection();
- }
- #endif
- }
- LeakReport::EndSection();
- }
- }
- void
- Recycler::ReportLeaksOnProcessDetach()
- {
- if (GetRecyclerFlagsTable().IsEnabled(Js::LeakReportFlag))
- {
- AUTO_LEAK_REPORT_SECTION(this->GetRecyclerFlagsTable(), _u("Recycler (%p): Process Termination"), this);
- LeakReport::StartRedirectOutput();
- ReportOnProcessDetach([=]() { this->ReportLeaks(); });
- LeakReport::EndRedirectOutput();
- }
- }
- #endif
- #ifdef CHECK_MEMORY_LEAK
- void
- Recycler::CheckLeaks(char16 const * header)
- {
- if (GetRecyclerFlagsTable().CheckMemoryLeak && this->isPrimaryMarkContextInitialized)
- {
- if (GetRecyclerFlagsTable().ForceMemoryLeak)
- {
- AUTO_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
- struct FakeMemory { Field(int) f; };
- FakeMemory * f = RecyclerNewStruct(this, FakeMemory);
- this->RootAddRef(f);
- }
- Output::CaptureStart();
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Recycler (%p): %s Leaked Roots\n"), this, header);
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- RecyclerObjectGraphDumper::Param param = { 0 };
- param.dumpRootOnly = true;
- param.skipStack = true;
- if (!this->DumpObjectGraph(¶m))
- {
- free(Output::CaptureEnd());
- Output::Print(_u("ERROR: Out of memory generating leak report\n"));
- return;
- }
- if (param.stats.markData.markCount != 0)
- {
- #ifdef STACK_BACK_TRACE
- if (GetRecyclerFlagsTable().LeakStackTrace)
- {
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Pinned object stack traces"));
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- this->PrintPinnedObjectStackTraces();
- }
- #endif
- Output::Print(_u("-------------------------------------------------------------------------------------\n"));
- Output::Print(_u("Recycler Leaked Object: %d bytes (%d objects)\n"),
- param.stats.markData.markBytes, param.stats.markData.markCount);
- char16 * buffer = Output::CaptureEnd();
- MemoryLeakCheck::AddLeakDump(buffer, param.stats.markData.markBytes, param.stats.markData.markCount);
- #ifdef GENERATE_DUMP
- if (GetRecyclerFlagsTable().IsEnabled(Js::DumpOnLeakFlag))
- {
- Js::Throw::GenerateDump(GetRecyclerFlagsTable().DumpOnLeak);
- }
- #endif
- }
- else
- {
- free(Output::CaptureEnd());
- }
- }
- }
- void
- Recycler::CheckLeaksOnProcessDetach(char16 const * header)
- {
- if (GetRecyclerFlagsTable().CheckMemoryLeak)
- {
- ReportOnProcessDetach([=]() { this->CheckLeaks(header); });
- }
- }
- #endif
- #if defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- template <class Fn>
- void
- Recycler::ReportOnProcessDetach(Fn fn)
- {
- #if DBG
- // Process detach can be done on any thread, just disable the thread check
- this->markContext.GetPageAllocator()->SetDisableThreadAccessCheck();
- #endif
- #if ENABLE_CONCURRENT_GC
- if (this->IsConcurrentState())
- {
- this->AbortConcurrent(true);
- }
- if (this->CollectionInProgress())
- {
- Output::Print(_u("WARNING: Thread terminated during GC. Can't dump object graph\n"));
- return;
- }
- #else
- Assert(!this->CollectionInProgress());
- #endif
- // Don't mark external roots on another thread
- this->SetExternalRootMarker(NULL, NULL);
- #if DBG
- this->ResetThreadId();
- #endif
- fn();
- }
- #ifdef STACK_BACK_TRACE
- void
- Recycler::PrintPinnedObjectStackTraces()
- {
- pinnedObjectMap.Map([this](void * object, PinRecord const& pinRecord)
- {
- this->DumpObjectDescription(object);
- Output::Print(_u("\n"));
- StackBackTraceNode::PrintAll(pinRecord.stackBackTraces);
- }
- );
- }
- #endif
- #endif
- #if defined(RECYCLER_DUMP_OBJECT_GRAPH) || defined(LEAK_REPORT) || defined(CHECK_MEMORY_LEAK)
- void
- Recycler::SetInDllCanUnloadNow()
- {
- inDllCanUnloadNow = true;
- // Just clear out the root marker for the dump graph and report leaks
- SetExternalRootMarker(NULL, NULL);
- }
- void
- Recycler::SetInDetachProcess()
- {
- inDetachProcess = true;
- // Just clear out the root marker for the dump graph and report leaks
- SetExternalRootMarker(NULL, NULL);
- }
- #endif
- #ifdef ENABLE_JS_ETW
- ULONG Recycler::EventWriteFreeMemoryBlock(HeapBlock* heapBlock)
- {
- if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK())
- {
- char* memoryAddress = NULL;
- ULONG objectSize = 0;
- ULONG blockSize = 0;
- switch (heapBlock->GetHeapBlockType())
- {
- case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
- case HeapBlock::HeapBlockType::SmallNormalBlockType:
- #ifdef RECYCLER_WRITE_BARRIER
- case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
- case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
- #endif
- case HeapBlock::HeapBlockType::SmallLeafBlockType:
- {
- SmallHeapBlock* smallHeapBlock = static_cast<SmallHeapBlock*>(heapBlock);
- memoryAddress = smallHeapBlock->GetAddress();
- blockSize = (ULONG)(smallHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = smallHeapBlock->GetObjectSize();
- }
- break;
- case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
- case HeapBlock::HeapBlockType::MediumNormalBlockType:
- #ifdef RECYCLER_WRITE_BARRIER
- case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
- case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
- #endif
- case HeapBlock::HeapBlockType::MediumLeafBlockType:
- {
- MediumHeapBlock* mediumHeapBlock = static_cast<MediumHeapBlock*>(heapBlock);
- memoryAddress = mediumHeapBlock->GetAddress();
- blockSize = (ULONG)(mediumHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = mediumHeapBlock->GetObjectSize();
- }
- case HeapBlock::HeapBlockType::LargeBlockType:
- {
- LargeHeapBlock* largeHeapBlock = static_cast<LargeHeapBlock*>(heapBlock);
- memoryAddress = largeHeapBlock->GetBeginAddress();
- blockSize = (ULONG)(largeHeapBlock->GetEndAddress() - memoryAddress);
- objectSize = blockSize;
- }
- break;
- default:
- AssertMsg(FALSE, "invalid heapblock type");
- }
- EventWriteJSCRIPT_RECYCLER_FREE_MEMORY_BLOCK(memoryAddress, blockSize, objectSize);
- }
- return S_OK;
- }
- void Recycler::FlushFreeRecord()
- {
- Assert(bulkFreeMemoryWrittenCount <= Recycler::BulkFreeMemoryCount);
- JS_ETW(EventWriteJSCRIPT_RECYCLER_FREE_MEMORY(bulkFreeMemoryWrittenCount, sizeof(Recycler::ETWFreeRecord), etwFreeRecords));
- bulkFreeMemoryWrittenCount = 0;
- }
- void Recycler::AppendFreeMemoryETWRecord(__in char *address, size_t size)
- {
- Assert(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
- __analysis_assume(bulkFreeMemoryWrittenCount < Recycler::BulkFreeMemoryCount);
- etwFreeRecords[bulkFreeMemoryWrittenCount].memoryAddress = address;
- // TODO: change to size_t or uint64?
- etwFreeRecords[bulkFreeMemoryWrittenCount].objectSize = (uint)size;
- bulkFreeMemoryWrittenCount++;
- if (bulkFreeMemoryWrittenCount == Recycler::BulkFreeMemoryCount)
- {
- FlushFreeRecord();
- Assert(bulkFreeMemoryWrittenCount == 0);
- }
- }
- #endif
- #ifdef PROFILE_EXEC
- ArenaAllocator *
- Recycler::AddBackgroundProfilerArena()
- {
- return this->backgroundProfilerArena.PrependNode(&HeapAllocator::Instance,
- _u("BgGCProfiler"), &this->backgroundProfilerPageAllocator, Js::Throw::OutOfMemory);
- }
- void
- Recycler::ReleaseBackgroundProfilerArena(ArenaAllocator * arena)
- {
- this->backgroundProfilerArena.RemoveElement(&HeapAllocator::Instance, arena);
- }
- void
- Recycler::SetProfiler(Js::Profiler * profiler, Js::Profiler * backgroundProfiler)
- {
- this->profiler = profiler;
- this->backgroundProfiler = backgroundProfiler;
- }
- #endif
- void Recycler::SetObjectBeforeCollectCallback(void* object,
- ObjectBeforeCollectCallback callback,
- void* callbackState,
- ObjectBeforeCollectCallbackWrapper callbackWrapper,
- void* threadContext)
- {
- if (objectBeforeCollectCallbackState == ObjectBeforeCollectCallback_Shutdown)
- {
- return; // NOP at shutdown
- }
- if (objectBeforeCollectCallbackMap == nullptr)
- {
- if (callback == nullptr) return;
- objectBeforeCollectCallbackMap = HeapNew(ObjectBeforeCollectCallbackMap, &HeapAllocator::Instance);
- }
- // only allow 1 callback per object
- objectBeforeCollectCallbackMap->Item(object, ObjectBeforeCollectCallbackData(callbackWrapper, callback, callbackState, threadContext));
- if (callback != nullptr && this->IsInObjectBeforeCollectCallback()) // revive
- {
- this->ScanMemory<false>(&object, sizeof(object));
- this->ProcessMark(/*background*/false);
- }
- }
- bool Recycler::ProcessObjectBeforeCollectCallbacks(bool atShutdown/*= false*/)
- {
- if (this->objectBeforeCollectCallbackMap == nullptr)
- {
- return false; // no callbacks
- }
- Assert(atShutdown || this->IsMarkState());
- Assert(!this->IsInObjectBeforeCollectCallback());
- AutoRestoreValue<ObjectBeforeCollectCallbackState> autoInObjectBeforeCollectCallback(&objectBeforeCollectCallbackState,
- atShutdown ? ObjectBeforeCollectCallback_Shutdown: ObjectBeforeCollectCallback_Normal);
- // The callbacks may register/unregister callbacks while we are enumerating the current map. To avoid
- // conflicting usage of the callback map, we swap it out. New registration will go to a new map.
- AutoAllocatorObjectPtr<ObjectBeforeCollectCallbackMap, HeapAllocator> oldCallbackMap(
- this->objectBeforeCollectCallbackMap, &HeapAllocator::Instance);
- this->objectBeforeCollectCallbackMap = nullptr;
- bool hasRemainingCallbacks = false;
- oldCallbackMap->MapAndRemoveIf([&](const ObjectBeforeCollectCallbackMap::EntryType& entry)
- {
- const ObjectBeforeCollectCallbackData& data = entry.Value();
- if (data.callback != nullptr)
- {
- void* object = entry.Key();
- if (atShutdown || !this->IsObjectMarked(object))
- {
- if (data.callbackWrapper != nullptr)
- {
- data.callbackWrapper(data.callback, object, data.callbackState, data.threadContext);
- }
- else
- {
- data.callback(object, data.callbackState);
- }
- }
- else
- {
- hasRemainingCallbacks = true;
- return false; // Do not remove this entry, remaining callback for future
- }
- }
- return true; // Remove this entry
- });
- // Merge back remaining callbacks if any
- if (hasRemainingCallbacks)
- {
- if (this->objectBeforeCollectCallbackMap == nullptr)
- {
- this->objectBeforeCollectCallbackMap = oldCallbackMap.Detach();
- }
- else
- {
- if (oldCallbackMap->Count() > this->objectBeforeCollectCallbackMap->Count())
- {
- // Swap so that oldCallbackMap is the smaller one
- ObjectBeforeCollectCallbackMap* tmp = oldCallbackMap.Detach();
- *&oldCallbackMap = this->objectBeforeCollectCallbackMap;
- this->objectBeforeCollectCallbackMap = tmp;
- }
- oldCallbackMap->Map([&](void* object, const ObjectBeforeCollectCallbackData& data)
- {
- this->objectBeforeCollectCallbackMap->Item(object, data);
- });
- }
- }
- return true; // maybe called callbacks
- }
- void Recycler::ClearObjectBeforeCollectCallbacks()
- {
- // This is called at shutting down. All objects will be gone. Invoke each registered callback if any.
- ProcessObjectBeforeCollectCallbacks(/*atShutdown*/true);
- Assert(objectBeforeCollectCallbackMap == nullptr);
- }
- #ifdef RECYCLER_TEST_SUPPORT
- void Recycler::SetCheckFn(BOOL(*checkFn)(char* addr, size_t size))
- {
- Assert(BinaryFeatureControl::RecyclerTest());
- this->EnsureNotCollecting();
- this->checkFn = checkFn;
- }
- #endif
- void
- Recycler::NotifyFree(__in char *address, size_t size)
- {
- RecyclerVerboseTrace(GetRecyclerFlagsTable(), _u("Sweeping object %p\n"), address);
- #ifdef RECYCLER_TEST_SUPPORT
- if (BinaryFeatureControl::RecyclerTest())
- {
- if (checkFn != NULL)
- checkFn(address, size);
- }
- #endif
- #ifdef ENABLE_JS_ETW
- if (EventEnabledJSCRIPT_RECYCLER_FREE_MEMORY())
- {
- AppendFreeMemoryETWRecord(address, (UINT)size);
- }
- #endif
- RecyclerMemoryTracking::ReportFree(this, address, size);
- RECYCLER_PERF_COUNTER_DEC(LiveObject);
- RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, size);
- if (HeapInfo::IsSmallBlockAllocation(HeapInfo::GetAlignedSizeNoCheck(size)))
- {
- RECYCLER_PERF_COUNTER_DEC(SmallHeapBlockLiveObject);
- RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockLiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockFreeObjectSize, size);
- }
- else
- {
- RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockLiveObject);
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, size);
- RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockFreeObjectSize, size);
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->VerifyEnabled())
- {
- VerifyCheckPad(address, size);
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- if (!CONFIG_FLAG(KeepRecyclerTrackData))
- {
- TrackFree(address, size);
- }
- #endif
- #ifdef RECYCLER_STATS
- collectionStats.objectSweptCount++;
- collectionStats.objectSweptBytes += size;
- if (!isForceSweeping)
- {
- collectionStats.objectSweptFreeListCount++;
- collectionStats.objectSweptFreeListBytes += size;
- }
- #endif
- }
- #if DBG
- void
- Recycler::WBSetBit(char* addr)
- {
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- heapBlock->WBSetBit(addr);
- break;
- }
- recycler = recycler->next;
- }
- }
- void
- Recycler::WBSetBits(char* addr, uint length)
- {
- Recycler* recycler = Recycler::recyclerList;
- while (recycler)
- {
- auto heapBlock = recycler->FindHeapBlock((void*)((UINT_PTR)addr&~HeapInfo::ObjectAlignmentMask));
- if (heapBlock)
- {
- heapBlock->WBSetBits(addr, length);
- }
- recycler = recycler->next;
- }
- }
- #endif
- size_t
- RecyclerHeapObjectInfo::GetSize() const
- {
- Assert(m_heapBlock);
- size_t size;
- #if LARGEHEAPBLOCK_ENCODING
- if (isUsingLargeHeapBlock)
- {
- size = m_largeHeapBlockHeader->objectSize;
- }
- #else
- if (m_heapBlock->IsLargeHeapBlock())
- {
- size = ((LargeHeapBlock*)m_heapBlock)->GetObjectSize(m_address);
- }
- #endif
- else
- {
- // All small heap block types have the same layout for the object size field.
- size = ((SmallHeapBlock*)m_heapBlock)->GetObjectSize();
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- if (m_recycler->VerifyEnabled())
- {
- size -= *(size_t *)(((char *)m_address) + size - sizeof(size_t));
- }
- #endif
- return size;
- }
- template char* Recycler::AllocWithAttributesInlined<(Memory::ObjectInfoBits)32, false>(size_t);
|