pytorch-metadata.json 365 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797
  1. [
  2. {
  3. "name": "__torch__.torch.classes.rnn.CellParamsBase",
  4. "inputs": [
  5. { "name": "type", "type": "string" },
  6. { "name": "tensors", "type": "Tensor[]" },
  7. { "name": "doubles", "type": "float64[]" },
  8. { "name": "longs", "type": "int64[]" },
  9. { "name": "packed_params", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase[]" }
  10. ]
  11. },
  12. {
  13. "name": "__torch__.torch.classes.xnnpack.Conv2dOpContext",
  14. "inputs": [
  15. { "name": "weight", "type": "Tensor" },
  16. { "name": "bias", "type": "Tensor", "optional": true },
  17. { "name": "stride", "type": "int64[]" },
  18. { "name": "padding", "type": "int64[]" },
  19. { "name": "dilation", "type": "int64[]" },
  20. { "name": "groups", "type": "int64" },
  21. { "name": "output_min", "type": "int64[]", "optional": true },
  22. { "name": "output_max", "type": "int64[]", "optional": true }
  23. ]
  24. },
  25. {
  26. "name": "__torch__.torch.classes.xnnpack.LinearOpContext",
  27. "inputs": [
  28. { "name": "weight", "type": "Tensor" },
  29. { "name": "bias", "type": "Tensor", "optional": true },
  30. { "name": "output_min", "type": "int64[]", "optional": true },
  31. { "name": "output_max", "type": "int64[]", "optional": true }
  32. ]
  33. },
  34. {
  35. "name": "torch.nn.modules.activation.ELU",
  36. "category": "Activation"
  37. },
  38. {
  39. "name": "torch.nn.modules.activation.GELU",
  40. "category": "Activation"
  41. },
  42. {
  43. "name": "torch.nn.modules.activation.GLU",
  44. "category": "Activation"
  45. },
  46. {
  47. "name": "torch.nn.modules.activation.Hardsigmoid",
  48. "category": "Activation"
  49. },
  50. {
  51. "name": "torch.nn.modules.activation.Hardswish",
  52. "category": "Activation"
  53. },
  54. {
  55. "name": "torch.nn.modules.activation.Hardtanh",
  56. "category": "Activation"
  57. },
  58. {
  59. "name": "torch.nn.modules.activation.LeakyReLU",
  60. "category": "Activation"
  61. },
  62. {
  63. "name": "torch.nn.modules.activation.LogSoftmax",
  64. "category": "Activation"
  65. },
  66. {
  67. "name": "torch.nn.modules.activation.PReLU",
  68. "category": "Activation"
  69. },
  70. {
  71. "name": "torch.nn.modules.activation.ReLU",
  72. "category": "Activation",
  73. "inputs": [
  74. { "name": "inplace", "default": false, "visible": false },
  75. { "name": "threshold", "default": 0 },
  76. { "name": "value", "default": 0 }
  77. ]
  78. },
  79. {
  80. "name": "torch.nn.modules.activation.ReLU6",
  81. "category": "Activation"
  82. },
  83. {
  84. "name": "torch.nn.modules.activation.SiLU",
  85. "category": "Activation"
  86. },
  87. {
  88. "name": "torch.nn.modules.activation.Sigmoid",
  89. "category": "Activation"
  90. },
  91. {
  92. "name": "torch.nn.modules.activation.Softmax",
  93. "category": "Activation"
  94. },
  95. {
  96. "name": "torch.nn.modules.activation.Softmax2d",
  97. "category": "Activation"
  98. },
  99. {
  100. "name": "torch.nn.modules.activation.Softplus",
  101. "category": "Activation"
  102. },
  103. {
  104. "name": "torch.nn.modules.activation.Tanh",
  105. "category": "Activation"
  106. },
  107. {
  108. "name": "torch.nn.modules.batchnorm.BatchNorm1d",
  109. "category": "Normalization"
  110. },
  111. {
  112. "name": "torch.nn.modules.batchnorm.BatchNorm2d",
  113. "category": "Normalization",
  114. "inputs": [
  115. { "name": "input" },
  116. { "name": "weight" },
  117. { "name": "bias" },
  118. { "name": "running_mean" },
  119. { "name": "running_var" },
  120. { "name": "num_batches_tracked", "visible": false },
  121. { "name": "eps", "default": 1e-05 },
  122. { "name": "momentum", "default": 0.1 },
  123. { "name": "affine", "default": true },
  124. { "name": "track_running_stats", "default": true }
  125. ]
  126. },
  127. {
  128. "name": "torch.nn.modules.conv.Conv1d",
  129. "category": "Layer",
  130. "inputs": [
  131. { "name": "input" },
  132. { "name": "weight" },
  133. { "name": "bias" },
  134. { "name": "output_padding", "visible": false },
  135. { "name": "in_channels", "visible": false },
  136. { "name": "out_channels", "visible": false },
  137. { "name": "groups", "default": 1 },
  138. { "name": "transposed", "default": false },
  139. { "name": "padding", "default": [ 0 ] },
  140. { "name": "dilation", "default": [ 1 ] },
  141. { "name": "stride", "default": [ 1 ] }
  142. ]
  143. },
  144. {
  145. "name": "torch.nn.modules.conv.Conv2d",
  146. "category": "Layer",
  147. "inputs": [
  148. { "name": "input" },
  149. { "name": "weight" },
  150. { "name": "bias" },
  151. { "name": "output_padding", "visible": false },
  152. { "name": "in_channels", "visible": false },
  153. { "name": "out_channels", "visible": false },
  154. { "name": "groups", "default": 1 },
  155. { "name": "transposed", "default": false },
  156. { "name": "padding", "default": [ 0, 0 ] },
  157. { "name": "dilation", "default": [ 1, 1 ] },
  158. { "name": "stride", "default": [ 1, 1 ] }
  159. ]
  160. },
  161. {
  162. "name": "torch.nn.modules.conv.Conv3d",
  163. "category": "Layer"
  164. },
  165. {
  166. "name": "torch.nn.modules.conv.ConvTranspose1d",
  167. "category": "Layer",
  168. "inputs": [
  169. { "name": "input" },
  170. { "name": "weight" },
  171. { "name": "bias" },
  172. { "name": "output_padding", "visible": false },
  173. { "name": "in_channels", "visible": false },
  174. { "name": "out_channels", "visible": false },
  175. { "name": "groups", "default": 1 },
  176. { "name": "transposed", "default": true },
  177. { "name": "padding", "default": [ 0 ] },
  178. { "name": "dilation", "default": [ 1 ] },
  179. { "name": "stride", "default": [ 1 ] }
  180. ]
  181. },
  182. {
  183. "name": "torch.nn.modules.conv.ConvTranspose2d",
  184. "category": "Layer",
  185. "inputs": [
  186. { "name": "input" },
  187. { "name": "weight" },
  188. { "name": "bias" },
  189. { "name": "output_padding", "visible": false },
  190. { "name": "in_channels", "visible": false },
  191. { "name": "out_channels", "visible": false },
  192. { "name": "groups", "default": 1 },
  193. { "name": "transposed", "default": true },
  194. { "name": "padding", "default": [ 0, 0 ] },
  195. { "name": "dilation", "default": [ 1, 1 ] },
  196. { "name": "stride", "default": [ 1, 1 ] }
  197. ]
  198. },
  199. {
  200. "name": "torch.nn.modules.conv.ConvTranspose3d",
  201. "category": "Layer"
  202. },
  203. {
  204. "name": "torch.nn.modules.dropout.Dropout",
  205. "category": "Dropout",
  206. "inputs": [
  207. { "name": "inplace", "default": false, "visible": false },
  208. { "name": "p", "default": 0.5 }
  209. ]
  210. },
  211. {
  212. "name": "torch.nn.modules.dropout.Dropout2d",
  213. "category": "Dropout",
  214. "inputs": [
  215. { "name": "inplace", "default": false, "visible": false },
  216. { "name": "p", "default": 0.5 }
  217. ]
  218. },
  219. {
  220. "name": "torch.nn.modules.instancenorm.InstanceNorm1d"
  221. },
  222. {
  223. "name": "torch.nn.modules.instancenorm.InstanceNorm2d"
  224. },
  225. {
  226. "name": "torch.nn.modules.instancenorm.InstanceNorm3d"
  227. },
  228. {
  229. "name": "torch.nn.modules.linear.Linear",
  230. "category": "Layer"
  231. },
  232. {
  233. "name": "torch.nn.modules.normalization.CrossMapLRN2d",
  234. "category": "Normalization",
  235. "inputs": [
  236. { "name": "alpha", "default": 0.0001 },
  237. { "name": "beta", "default": 0.75 },
  238. { "name": "k", "default": 1 }
  239. ]
  240. },
  241. {
  242. "name": "torch.nn.modules.normalization.GroupNorm",
  243. "category": "Normalization"
  244. },
  245. {
  246. "name": "torch.nn.modules.normalization.LayerNorm",
  247. "category": "Normalization"
  248. },
  249. {
  250. "name": "torch.nn.modules.padding.ConstantPad1d",
  251. "category": "Tensor"
  252. },
  253. {
  254. "name": "torch.nn.modules.padding.ConstantPad2d",
  255. "category": "Tensor"
  256. },
  257. {
  258. "name": "torch.nn.modules.padding.ConstantPad3d",
  259. "category": "Tensor"
  260. },
  261. {
  262. "name": "torch.nn.modules.padding.ReflectionPad1d",
  263. "category": "Tensor"
  264. },
  265. {
  266. "name": "torch.nn.modules.padding.ReflectionPad2d",
  267. "category": "Tensor"
  268. },
  269. {
  270. "name": "torch.nn.modules.padding.ReplicationPad1d",
  271. "category": "Tensor"
  272. },
  273. {
  274. "name": "torch.nn.modules.padding.ReplicationPad2d",
  275. "category": "Tensor"
  276. },
  277. {
  278. "name": "torch.nn.modules.padding.ReplicationPad3d",
  279. "category": "Tensor"
  280. },
  281. {
  282. "name": "torch.nn.modules.padding.ZeroPad2d",
  283. "category": "Tensor"
  284. },
  285. {
  286. "name": "torch.nn.modules.pixelshuffle.PixelShuffle"
  287. },
  288. {
  289. "name": "torch.nn.modules.pooling.AdaptiveAvgPool1d",
  290. "category": "Pool"
  291. },
  292. {
  293. "name": "torch.nn.modules.pooling.AdaptiveAvgPool2d",
  294. "category": "Pool"
  295. },
  296. {
  297. "name": "torch.nn.modules.pooling.AdaptiveAvgPool3d",
  298. "category": "Pool"
  299. },
  300. {
  301. "name": "torch.nn.modules.pooling.AdaptiveMaxPool1d",
  302. "category": "Pool"
  303. },
  304. {
  305. "name": "torch.nn.modules.pooling.AdaptiveMaxPool2d",
  306. "category": "Pool"
  307. },
  308. {
  309. "name": "torch.nn.modules.pooling.AdaptiveMaxPool3d",
  310. "category": "Pool"
  311. },
  312. {
  313. "name": "torch.nn.modules.pooling.AvgPool2d",
  314. "category": "Pool",
  315. "inputs": [
  316. { "name": "padding", "default": 0 },
  317. { "name": "count_include_pad", "default": true },
  318. { "name": "ceil_mode", "visible": false }
  319. ]
  320. },
  321. {
  322. "name": "torch.nn.modules.pooling.AvgPool3d",
  323. "category": "Pool"
  324. },
  325. {
  326. "name": "torch.nn.modules.pooling.MaxPool1d",
  327. "category": "Pool"
  328. },
  329. {
  330. "name": "torch.nn.modules.pooling.MaxPool2d",
  331. "category": "Pool",
  332. "inputs": [
  333. { "name": "input" },
  334. { "name": "padding", "default": 0 },
  335. { "name": "dilation", "default": 1 },
  336. { "name": "return_indices", "default": false },
  337. { "name": "ceil_mode", "visible": false }
  338. ]
  339. },
  340. {
  341. "name": "torch.nn.modules.pooling.MaxPool3d",
  342. "category": "Pool"
  343. },
  344. {
  345. "name": "torch.nn.modules.pooling.MaxUnpool1d",
  346. "category": "Pool"
  347. },
  348. {
  349. "name": "torch.nn.modules.pooling.MaxUnpool2d",
  350. "category": "Pool"
  351. },
  352. {
  353. "name": "torch.nn.modules.pooling.MaxUnpool3d",
  354. "category": "Pool"
  355. },
  356. {
  357. "name": "torch.nn.modules.rnn.GRU",
  358. "category": "Layer"
  359. },
  360. {
  361. "name": "torch.nn.modules.rnn.GRUCell",
  362. "category": "Layer"
  363. },
  364. {
  365. "name": "torch.nn.modules.rnn.LSTM",
  366. "category": "Layer",
  367. "inputs": [
  368. { "name": "input" },
  369. { "name": "weight_ih_l0", "visible": false },
  370. { "name": "weight_hh_l0", "visible": false },
  371. { "name": "bias_ih_l0", "visible": false },
  372. { "name": "bias_hh_l0", "visible": false },
  373. { "name": "weight_ih_l1", "visible": false },
  374. { "name": "weight_hh_l1", "visible": false },
  375. { "name": "bias_ih_l1", "visible": false },
  376. { "name": "bias_hh_l1", "visible": false },
  377. { "name": "dropout", "default": 0 },
  378. { "name": "dropout_state", "default": {} },
  379. { "name": "num_layers", "default": 1 },
  380. { "name": "batch_first", "visible": false },
  381. { "name": "bidirectional", "visible": false },
  382. { "name": "bias", "visible": false }
  383. ]
  384. },
  385. {
  386. "name": "torch.nn.modules.rnn.LSTMCell",
  387. "category": "Layer"
  388. },
  389. {
  390. "name": "torch.nn.modules.rnn.RNN",
  391. "category": "Layer"
  392. },
  393. {
  394. "name": "torch.nn.modules.sparse.Embedding",
  395. "category": "Transform",
  396. "inputs": [
  397. { "name": "norm_type", "default": 2 },
  398. { "name": "scale_grad_by_freq", "default": false },
  399. { "name": "sparse", "default": false },
  400. { "name": "max_norm", "default": null },
  401. { "name": "padding_idx", "default": null }
  402. ]
  403. },
  404. {
  405. "name": "torch.nn.modules.upsampling.Upsample",
  406. "category": "Data"
  407. },
  408. {
  409. "name": "_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  410. },
  411. {
  412. "name": "_caffe2::BatchPermutation(Tensor X, Tensor indices, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  413. },
  414. {
  415. "name": "_caffe2::BoxWithNMSLimit(Tensor scores, Tensor boxes, Tensor batch_splits, float score_thresh, float nms, int detections_per_im, bool soft_nms_enabled, str soft_nms_method, float soft_nms_sigma, float soft_nms_min_score_thres, bool rotated, bool cls_agnostic_bbox_reg, bool input_boxes_include_bg_cls, bool output_classes_include_bg_cls, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor scores, Tensor boxes, Tensor classes, Tensor batch_splits, Tensor keeps, Tensor keeps_size)"
  416. },
  417. {
  418. "name": "_caffe2::CollectAndDistributeFpnRpnProposals(Tensor[] input_list, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois, Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  419. },
  420. {
  421. "name": "_caffe2::CollectRpnProposals(Tensor[] input_list, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois)"
  422. },
  423. {
  424. "name": "_caffe2::CopyCPUToGPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  425. },
  426. {
  427. "name": "_caffe2::CopyGPUToCPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  428. },
  429. {
  430. "name": "_caffe2::DistributeFpnProposals(Tensor rois, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  431. },
  432. {
  433. "name": "_caffe2::GenerateProposals(Tensor scores, Tensor bbox_deltas, Tensor im_info, Tensor anchors, float spatial_scale, int pre_nms_topN, int post_nms_topN, float nms_thresh, float min_size, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  434. },
  435. {
  436. "name": "_caffe2::RoIAlign(Tensor features, Tensor rois, str order, float spatial_scale, int pooled_h, int pooled_w, int sampling_ratio, bool aligned, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  437. },
  438. {
  439. "name": "aqlm::code2x8_lut_matmat.out(Tensor input, Tensor codes, Tensor codebooks, Tensor scales, Tensor? bias, Tensor(a!) out) -> Tensor(a!)"
  440. },
  441. {
  442. "name": "aten::Bool.Tensor(Tensor a) -> bool"
  443. },
  444. {
  445. "name": "aten::Bool.int(int a) -> bool"
  446. },
  447. {
  448. "name": "aten::Bool.float(float a) -> bool"
  449. },
  450. {
  451. "name": "aten::Complex.Scalar(Scalar a) -> complex"
  452. },
  453. {
  454. "name": "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"
  455. },
  456. {
  457. "name": "aten::Complex.int_bool(int x, bool y) -> complex"
  458. },
  459. {
  460. "name": "aten::Complex.bool_int(bool x, int y) -> complex"
  461. },
  462. {
  463. "name": "aten::Complex.float_bool(float x, bool y) -> complex"
  464. },
  465. {
  466. "name": "aten::Complex.bool_float(bool x, float y) -> complex"
  467. },
  468. {
  469. "name": "aten::Complex.float_int(float x, int y) -> complex"
  470. },
  471. {
  472. "name": "aten::Complex.int_float(int x, float y) -> complex"
  473. },
  474. {
  475. "name": "aten::Complex.int_int(int x, int y) -> complex"
  476. },
  477. {
  478. "name": "aten::Complex.bool_bool(bool x, bool y) -> complex"
  479. },
  480. {
  481. "name": "aten::Complex.float_float(float x, float y) -> complex"
  482. },
  483. {
  484. "name": "aten::Complex.Tensor_float(Tensor x, float y) -> complex"
  485. },
  486. {
  487. "name": "aten::Complex.float_Tensor(float x, Tensor y) -> complex"
  488. },
  489. {
  490. "name": "aten::Complex.Tensor_int(Tensor x, int y) -> complex"
  491. },
  492. {
  493. "name": "aten::Complex.int_Tensor(int x, Tensor y) -> complex"
  494. },
  495. {
  496. "name": "aten::Complex.Tensor_bool(Tensor x, bool y) -> complex"
  497. },
  498. {
  499. "name": "aten::Complex.bool_Tensor(bool x, Tensor y) -> complex"
  500. },
  501. {
  502. "name": "aten::ComplexImplicit(Tensor a) -> complex"
  503. },
  504. {
  505. "name": "aten::Delete.t(t[](a!) self, int idx) -> ()"
  506. },
  507. {
  508. "name": "aten::Delete.Dict_str(Dict(str, t)(a!) self, str key) -> ()"
  509. },
  510. {
  511. "name": "aten::Delete.Dict_int(Dict(int, t)(a!) self, int key) -> ()"
  512. },
  513. {
  514. "name": "aten::Delete.Dict_bool(Dict(bool, t)(a!) self, bool key) -> ()"
  515. },
  516. {
  517. "name": "aten::Delete.Dict_float(Dict(float, t)(a!) self, float key) -> ()"
  518. },
  519. {
  520. "name": "aten::Delete.Dict_complex(Dict(complex, t)(a!) self, complex key) -> ()"
  521. },
  522. {
  523. "name": "aten::Delete.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> ()"
  524. },
  525. {
  526. "name": "aten::Float.Tensor(Tensor a) -> float"
  527. },
  528. {
  529. "name": "aten::Float.Scalar(Scalar a) -> float"
  530. },
  531. {
  532. "name": "aten::Float.int(int a) -> float"
  533. },
  534. {
  535. "name": "aten::Float.bool(bool a) -> float"
  536. },
  537. {
  538. "name": "aten::Float.str(str a) -> float"
  539. },
  540. {
  541. "name": "aten::FloatImplicit(Tensor a) -> float"
  542. },
  543. {
  544. "name": "aten::Int.Tensor(Tensor a) -> int"
  545. },
  546. {
  547. "name": "aten::Int.bool(bool a) -> int"
  548. },
  549. {
  550. "name": "aten::Int.float(float a) -> int"
  551. },
  552. {
  553. "name": "aten::Int.Scalar(Scalar a) -> int"
  554. },
  555. {
  556. "name": "aten::Int.str(str a) -> int"
  557. },
  558. {
  559. "name": "aten::IntImplicit(Tensor a) -> int"
  560. },
  561. {
  562. "name": "aten::ScalarImplicit(Tensor a) -> Scalar"
  563. },
  564. {
  565. "name": "aten::Size(int[] sizes) -> int[]"
  566. },
  567. {
  568. "name": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor"
  569. },
  570. {
  571. "name": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor"
  572. },
  573. {
  574. "name": "aten::__and__.bool(bool a, bool b) -> bool"
  575. },
  576. {
  577. "name": "aten::__and__.int(int a, int b) -> int"
  578. },
  579. {
  580. "name": "aten::__contains__.int_list(int[] l, int item) -> bool"
  581. },
  582. {
  583. "name": "aten::__contains__.str_list(str[] l, str item) -> bool"
  584. },
  585. {
  586. "name": "aten::__contains__.str(Dict(str, t) dict, str key) -> bool"
  587. },
  588. {
  589. "name": "aten::__contains__.int(Dict(int, t) dict, int key) -> bool"
  590. },
  591. {
  592. "name": "aten::__contains__.bool(Dict(bool, t) dict, bool key) -> bool"
  593. },
  594. {
  595. "name": "aten::__contains__.float(Dict(float, t) dict, float key) -> bool"
  596. },
  597. {
  598. "name": "aten::__contains__.complex(Dict(complex, t) dict, complex key) -> bool"
  599. },
  600. {
  601. "name": "aten::__contains__.Tensor(Dict(Tensor, t) dict, Tensor key) -> bool"
  602. },
  603. {
  604. "name": "aten::__contains__.float_list(float[] l, float item) -> bool"
  605. },
  606. {
  607. "name": "aten::__derive_index(int index, int start, int step) -> int"
  608. },
  609. {
  610. "name": "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"
  611. },
  612. {
  613. "name": "aten::__getitem__.str(str s, int index) -> str"
  614. },
  615. {
  616. "name": "aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)"
  617. },
  618. {
  619. "name": "aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)"
  620. },
  621. {
  622. "name": "aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)"
  623. },
  624. {
  625. "name": "aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)"
  626. },
  627. {
  628. "name": "aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)"
  629. },
  630. {
  631. "name": "aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)"
  632. },
  633. {
  634. "name": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  635. },
  636. {
  637. "name": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  638. },
  639. {
  640. "name": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  641. },
  642. {
  643. "name": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  644. },
  645. {
  646. "name": "aten::__interpolate.scale_list(Tensor input, int? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  647. },
  648. {
  649. "name": "aten::__interpolate.size_list_scale_list(Tensor input, int[]? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  650. },
  651. {
  652. "name": "aten::__interpolate(Tensor input, int? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  653. },
  654. {
  655. "name": "aten::__interpolate.size_list(Tensor input, int[]? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  656. },
  657. {
  658. "name": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  659. },
  660. {
  661. "name": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  662. },
  663. {
  664. "name": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  665. },
  666. {
  667. "name": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  668. },
  669. {
  670. "name": "aten::__is__(t1 self, t2 obj) -> bool"
  671. },
  672. {
  673. "name": "aten::__isnot__(t1 self, t2 obj) -> bool"
  674. },
  675. {
  676. "name": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  677. },
  678. {
  679. "name": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  680. },
  681. {
  682. "name": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  683. },
  684. {
  685. "name": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  686. },
  687. {
  688. "name": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  689. },
  690. {
  691. "name": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  692. },
  693. {
  694. "name": "aten::__lshift__.int(int a, int b) -> int"
  695. },
  696. {
  697. "name": "aten::__not__(bool self) -> bool"
  698. },
  699. {
  700. "name": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor"
  701. },
  702. {
  703. "name": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor"
  704. },
  705. {
  706. "name": "aten::__or__.bool(bool a, bool b) -> bool"
  707. },
  708. {
  709. "name": "aten::__or__.int(int a, int b) -> int"
  710. },
  711. {
  712. "name": "aten::__range_length(int lo, int hi, int step) -> int"
  713. },
  714. {
  715. "name": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  716. },
  717. {
  718. "name": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  719. },
  720. {
  721. "name": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  722. },
  723. {
  724. "name": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  725. },
  726. {
  727. "name": "aten::__rshift__.int(int a, int b) -> int"
  728. },
  729. {
  730. "name": "aten::__upsample(Tensor input, int? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  731. "category": "Layer"
  732. },
  733. {
  734. "name": "aten::__upsample.size_list(Tensor input, int[]? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  735. "category": "Layer"
  736. },
  737. {
  738. "name": "aten::__upsample_bilinear(Tensor input, int? size=None, int? scale_factor=None) -> Tensor"
  739. },
  740. {
  741. "name": "aten::__upsample_bilinear.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> Tensor"
  742. },
  743. {
  744. "name": "aten::__upsample_bilinear.scale_list(Tensor input, int? size=None, int[]? scale_factor=None) -> Tensor"
  745. },
  746. {
  747. "name": "aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size=None, int[]? scale_factor=None) -> Tensor"
  748. },
  749. {
  750. "name": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor"
  751. },
  752. {
  753. "name": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor"
  754. },
  755. {
  756. "name": "aten::__xor__.bool(bool a, bool b) -> bool"
  757. },
  758. {
  759. "name": "aten::__xor__.int(int a, int b) -> int"
  760. },
  761. {
  762. "name": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor"
  763. },
  764. {
  765. "name": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  766. },
  767. {
  768. "name": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  769. },
  770. {
  771. "name": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  772. },
  773. {
  774. "name": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  775. },
  776. {
  777. "name": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  778. },
  779. {
  780. "name": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  781. },
  782. {
  783. "name": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  784. },
  785. {
  786. "name": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)"
  787. },
  788. {
  789. "name": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)"
  790. },
  791. {
  792. "name": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  793. },
  794. {
  795. "name": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  796. },
  797. {
  798. "name": "aten::_assert_scalar(Scalar self, str assert_msg) -> ()"
  799. },
  800. {
  801. "name": "aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()"
  802. },
  803. {
  804. "name": "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)"
  805. },
  806. {
  807. "name": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)"
  808. },
  809. {
  810. "name": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor"
  811. },
  812. {
  813. "name": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor"
  814. },
  815. {
  816. "name": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor"
  817. },
  818. {
  819. "name": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor"
  820. },
  821. {
  822. "name": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor"
  823. },
  824. {
  825. "name": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor"
  826. },
  827. {
  828. "name": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor"
  829. },
  830. {
  831. "name": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor"
  832. },
  833. {
  834. "name": "aten::_cat(Tensor[] tensors, int dim=0) -> Tensor"
  835. },
  836. {
  837. "name": "aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  838. },
  839. {
  840. "name": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor"
  841. },
  842. {
  843. "name": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)"
  844. },
  845. {
  846. "name": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  847. },
  848. {
  849. "name": "aten::_coalesce(Tensor self) -> Tensor"
  850. },
  851. {
  852. "name": "aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)"
  853. },
  854. {
  855. "name": "aten::_conj(Tensor(a) self) -> Tensor(a)"
  856. },
  857. {
  858. "name": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor",
  859. "category": "Layer"
  860. },
  861. {
  862. "name": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
  863. "category": "Layer"
  864. },
  865. {
  866. "name": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)"
  867. },
  868. {
  869. "name": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor"
  870. },
  871. {
  872. "name": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  873. },
  874. {
  875. "name": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  876. },
  877. {
  878. "name": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  879. },
  880. {
  881. "name": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  882. },
  883. {
  884. "name": "aten::_dim_arange(Tensor like, int dim) -> Tensor"
  885. },
  886. {
  887. "name": "aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)"
  888. },
  889. {
  890. "name": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  891. },
  892. {
  893. "name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> Tensor",
  894. "category": "Quantization"
  895. },
  896. {
  897. "name": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)",
  898. "category": "Quantization"
  899. },
  900. {
  901. "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor, Tensor, Tensor)",
  902. "category": "Quantization"
  903. },
  904. {
  905. "name": "aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor"
  906. },
  907. {
  908. "name": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)"
  909. },
  910. {
  911. "name": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool"
  912. },
  913. {
  914. "name": "aten::_indices(Tensor(a) self) -> Tensor(a)"
  915. },
  916. {
  917. "name": "aten::_infer_size(int[] a, int[] b) -> int[]"
  918. },
  919. {
  920. "name": "aten::_local_scalar_dense(Tensor self) -> Scalar"
  921. },
  922. {
  923. "name": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor"
  924. },
  925. {
  926. "name": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)"
  927. },
  928. {
  929. "name": "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)"
  930. },
  931. {
  932. "name": "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)"
  933. },
  934. {
  935. "name": "aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))"
  936. },
  937. {
  938. "name": "aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  939. },
  940. {
  941. "name": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)",
  942. "category": "Normalization"
  943. },
  944. {
  945. "name": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
  946. "category": "Normalization"
  947. },
  948. {
  949. "name": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))",
  950. "category": "Normalization"
  951. },
  952. {
  953. "name": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)",
  954. "category": "Attention"
  955. },
  956. {
  957. "name": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  958. },
  959. {
  960. "name": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor"
  961. },
  962. {
  963. "name": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)"
  964. },
  965. {
  966. "name": "aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool"
  967. },
  968. {
  969. "name": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)"
  970. },
  971. {
  972. "name": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  973. },
  974. {
  975. "name": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)"
  976. },
  977. {
  978. "name": "aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor"
  979. },
  980. {
  981. "name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0., bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)"
  982. },
  983. {
  984. "name": "aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)"
  985. },
  986. {
  987. "name": "aten::_scaled_dot_product_flash_attention.quantized(Tensor query, Tensor key, Tensor value, Tensor? q_descale, Tensor? k_descale, Tensor? v_descale, float dropout_p=0., bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)"
  988. },
  989. {
  990. "name": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)"
  991. },
  992. {
  993. "name": "aten::_set_item.t(t[](a!) l, int idx, t(b -> *) el) -> t[](a!)"
  994. },
  995. {
  996. "name": "aten::_set_item.str(Dict(str, t)(a!) l, str(b -> *) idx, t(c -> *) v) -> ()"
  997. },
  998. {
  999. "name": "aten::_set_item.int(Dict(int, t)(a!) l, int(b -> *) idx, t(c -> *) v) -> ()"
  1000. },
  1001. {
  1002. "name": "aten::_set_item.bool(Dict(bool, t)(a!) l, bool(b -> *) idx, t(c -> *) v) -> ()"
  1003. },
  1004. {
  1005. "name": "aten::_set_item.float(Dict(float, t)(a!) l, float(b -> *) idx, t(c -> *) v) -> ()"
  1006. },
  1007. {
  1008. "name": "aten::_set_item.complex(Dict(complex, t)(a!) l, complex(b -> *) idx, t(c -> *) v) -> ()"
  1009. },
  1010. {
  1011. "name": "aten::_set_item.Tensor(Dict(Tensor, t)(a!) l, Tensor(b -> *) idx, t(c -> *) v) -> ()"
  1012. },
  1013. {
  1014. "name": "aten::_shape_as_tensor(Tensor self) -> Tensor"
  1015. },
  1016. {
  1017. "name": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor",
  1018. "category": "Activation"
  1019. },
  1020. {
  1021. "name": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"
  1022. },
  1023. {
  1024. "name": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor",
  1025. "category": "Tensor"
  1026. },
  1027. {
  1028. "name": "aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor"
  1029. },
  1030. {
  1031. "name": "aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor"
  1032. },
  1033. {
  1034. "name": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor"
  1035. },
  1036. {
  1037. "name": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)"
  1038. },
  1039. {
  1040. "name": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1041. },
  1042. {
  1043. "name": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)"
  1044. },
  1045. {
  1046. "name": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1047. },
  1048. {
  1049. "name": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor"
  1050. },
  1051. {
  1052. "name": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  1053. },
  1054. {
  1055. "name": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor"
  1056. },
  1057. {
  1058. "name": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)"
  1059. },
  1060. {
  1061. "name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  1062. },
  1063. {
  1064. "name": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1065. },
  1066. {
  1067. "name": "aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"
  1068. },
  1069. {
  1070. "name": "aten::_unsafe_index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"
  1071. },
  1072. {
  1073. "name": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor"
  1074. },
  1075. {
  1076. "name": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  1077. },
  1078. {
  1079. "name": "aten::_unwrap_optional(t(a)? optional) -> t(a)"
  1080. },
  1081. {
  1082. "name": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  1083. },
  1084. {
  1085. "name": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  1086. },
  1087. {
  1088. "name": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  1089. },
  1090. {
  1091. "name": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  1092. },
  1093. {
  1094. "name": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  1095. },
  1096. {
  1097. "name": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  1098. },
  1099. {
  1100. "name": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor"
  1101. },
  1102. {
  1103. "name": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  1104. },
  1105. {
  1106. "name": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  1107. },
  1108. {
  1109. "name": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  1110. },
  1111. {
  1112. "name": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  1113. },
  1114. {
  1115. "name": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  1116. },
  1117. {
  1118. "name": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  1119. },
  1120. {
  1121. "name": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  1122. },
  1123. {
  1124. "name": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  1125. },
  1126. {
  1127. "name": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor"
  1128. },
  1129. {
  1130. "name": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  1131. },
  1132. {
  1133. "name": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)"
  1134. },
  1135. {
  1136. "name": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1137. },
  1138. {
  1139. "name": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  1140. },
  1141. {
  1142. "name": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1143. },
  1144. {
  1145. "name": "aten::abs(Tensor self) -> Tensor"
  1146. },
  1147. {
  1148. "name": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1149. },
  1150. {
  1151. "name": "aten::abs_(Tensor(a!) self) -> Tensor(a!)"
  1152. },
  1153. {
  1154. "name": "aten::acos(Tensor self) -> Tensor"
  1155. },
  1156. {
  1157. "name": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1158. },
  1159. {
  1160. "name": "aten::acos.int(int a) -> float"
  1161. },
  1162. {
  1163. "name": "aten::acos.float(float a) -> float"
  1164. },
  1165. {
  1166. "name": "aten::acos.complex(complex a) -> complex"
  1167. },
  1168. {
  1169. "name": "aten::acos.Scalar(Scalar a) -> Scalar"
  1170. },
  1171. {
  1172. "name": "aten::acos_(Tensor(a!) self) -> Tensor(a!)"
  1173. },
  1174. {
  1175. "name": "aten::acosh(Tensor self) -> Tensor"
  1176. },
  1177. {
  1178. "name": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1179. },
  1180. {
  1181. "name": "aten::acosh.int(int a) -> float"
  1182. },
  1183. {
  1184. "name": "aten::acosh.float(float a) -> float"
  1185. },
  1186. {
  1187. "name": "aten::acosh.complex(complex a) -> complex"
  1188. },
  1189. {
  1190. "name": "aten::acosh.Scalar(Scalar a) -> Scalar"
  1191. },
  1192. {
  1193. "name": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)"
  1194. },
  1195. {
  1196. "name": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor",
  1197. "category": "Pool"
  1198. },
  1199. {
  1200. "name": "aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  1201. },
  1202. {
  1203. "name": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor",
  1204. "category": "Pool"
  1205. },
  1206. {
  1207. "name": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)",
  1208. "category": "Pool"
  1209. },
  1210. {
  1211. "name": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor",
  1212. "category": "Pool"
  1213. },
  1214. {
  1215. "name": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  1216. },
  1217. {
  1218. "name": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)",
  1219. "category": "Pool"
  1220. },
  1221. {
  1222. "name": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)",
  1223. "category": "Pool"
  1224. },
  1225. {
  1226. "name": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  1227. },
  1228. {
  1229. "name": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)",
  1230. "category": "Pool"
  1231. },
  1232. {
  1233. "name": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  1234. },
  1235. {
  1236. "name": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  1237. },
  1238. {
  1239. "name": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  1240. },
  1241. {
  1242. "name": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1243. },
  1244. {
  1245. "name": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  1246. },
  1247. {
  1248. "name": "aten::add.t(t[] a, t[] b) -> t[]"
  1249. },
  1250. {
  1251. "name": "aten::add.str(str a, str b) -> str"
  1252. },
  1253. {
  1254. "name": "aten::add.int(int a, int b) -> int"
  1255. },
  1256. {
  1257. "name": "aten::add.complex(complex a, complex b) -> complex"
  1258. },
  1259. {
  1260. "name": "aten::add.float(float a, float b) -> float"
  1261. },
  1262. {
  1263. "name": "aten::add.int_complex(int a, complex b) -> complex"
  1264. },
  1265. {
  1266. "name": "aten::add.complex_int(complex a, int b) -> complex"
  1267. },
  1268. {
  1269. "name": "aten::add.float_complex(float a, complex b) -> complex"
  1270. },
  1271. {
  1272. "name": "aten::add.complex_float(complex a, float b) -> complex"
  1273. },
  1274. {
  1275. "name": "aten::add.int_float(int a, float b) -> float"
  1276. },
  1277. {
  1278. "name": "aten::add.float_int(float a, int b) -> float"
  1279. },
  1280. {
  1281. "name": "aten::add(Scalar a, Scalar b) -> Scalar"
  1282. },
  1283. {
  1284. "name": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  1285. },
  1286. {
  1287. "name": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  1288. },
  1289. {
  1290. "name": "aten::add_.t(t[](a!) self, t[] b) -> t[]"
  1291. },
  1292. {
  1293. "name": "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1294. },
  1295. {
  1296. "name": "aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1297. },
  1298. {
  1299. "name": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  1300. },
  1301. {
  1302. "name": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  1303. },
  1304. {
  1305. "name": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  1306. },
  1307. {
  1308. "name": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  1309. },
  1310. {
  1311. "name": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)"
  1312. },
  1313. {
  1314. "name": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
  1315. "category": "Layer"
  1316. },
  1317. {
  1318. "name": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1319. },
  1320. {
  1321. "name": "aten::addmm.dtype_out(Tensor self, Tensor mat1, Tensor mat2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1322. },
  1323. {
  1324. "name": "aten::addmm.dtype(Tensor self, Tensor mat1, Tensor mat2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1325. },
  1326. {
  1327. "name": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  1328. },
  1329. {
  1330. "name": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1331. },
  1332. {
  1333. "name": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1334. },
  1335. {
  1336. "name": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  1337. },
  1338. {
  1339. "name": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor"
  1340. },
  1341. {
  1342. "name": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"
  1343. },
  1344. {
  1345. "name": "aten::alias(Tensor(a) self) -> Tensor(a)"
  1346. },
  1347. {
  1348. "name": "aten::alias_copy(Tensor self) -> Tensor"
  1349. },
  1350. {
  1351. "name": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1352. },
  1353. {
  1354. "name": "aten::all(Tensor self) -> Tensor"
  1355. },
  1356. {
  1357. "name": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  1358. },
  1359. {
  1360. "name": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  1361. },
  1362. {
  1363. "name": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1364. },
  1365. {
  1366. "name": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1367. },
  1368. {
  1369. "name": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1370. },
  1371. {
  1372. "name": "aten::all.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  1373. },
  1374. {
  1375. "name": "aten::all.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1376. },
  1377. {
  1378. "name": "aten::all.int(int[] self) -> bool"
  1379. },
  1380. {
  1381. "name": "aten::all.float(float[] self) -> bool"
  1382. },
  1383. {
  1384. "name": "aten::all.bool(bool[] self) -> bool"
  1385. },
  1386. {
  1387. "name": "aten::allclose(Tensor self, Tensor other, float rtol=1.0000000000000001e-05, float atol=1e-08, bool equal_nan=False) -> bool"
  1388. },
  1389. {
  1390. "name": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  1391. "category": "Dropout"
  1392. },
  1393. {
  1394. "name": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  1395. "category": "Dropout"
  1396. },
  1397. {
  1398. "name": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  1399. },
  1400. {
  1401. "name": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1402. },
  1403. {
  1404. "name": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  1405. },
  1406. {
  1407. "name": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1408. },
  1409. {
  1410. "name": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)"
  1411. },
  1412. {
  1413. "name": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)"
  1414. },
  1415. {
  1416. "name": "aten::angle(Tensor self) -> Tensor"
  1417. },
  1418. {
  1419. "name": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1420. },
  1421. {
  1422. "name": "aten::angle.int(int a) -> float"
  1423. },
  1424. {
  1425. "name": "aten::angle.float(float a) -> float"
  1426. },
  1427. {
  1428. "name": "aten::angle.complex(complex a) -> float"
  1429. },
  1430. {
  1431. "name": "aten::angle.Scalar(Scalar a) -> Scalar"
  1432. },
  1433. {
  1434. "name": "aten::any(Tensor self) -> Tensor"
  1435. },
  1436. {
  1437. "name": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  1438. },
  1439. {
  1440. "name": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  1441. },
  1442. {
  1443. "name": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1444. },
  1445. {
  1446. "name": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1447. },
  1448. {
  1449. "name": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1450. },
  1451. {
  1452. "name": "aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  1453. },
  1454. {
  1455. "name": "aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1456. },
  1457. {
  1458. "name": "aten::any.str(str[] self) -> bool"
  1459. },
  1460. {
  1461. "name": "aten::any.int(int[] self) -> bool"
  1462. },
  1463. {
  1464. "name": "aten::any.float(float[] self) -> bool"
  1465. },
  1466. {
  1467. "name": "aten::any.bool(bool[] self) -> bool"
  1468. },
  1469. {
  1470. "name": "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"
  1471. },
  1472. {
  1473. "name": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1474. },
  1475. {
  1476. "name": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1477. },
  1478. {
  1479. "name": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1480. },
  1481. {
  1482. "name": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"
  1483. },
  1484. {
  1485. "name": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
  1486. },
  1487. {
  1488. "name": "aten::arange.start_out_(Scalar start, Scalar end) -> Tensor"
  1489. },
  1490. {
  1491. "name": "aten::arctan(Tensor self) -> Tensor"
  1492. },
  1493. {
  1494. "name": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1495. },
  1496. {
  1497. "name": "aten::arctan2(Tensor self, Tensor other) -> Tensor"
  1498. },
  1499. {
  1500. "name": "aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1501. },
  1502. {
  1503. "name": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)"
  1504. },
  1505. {
  1506. "name": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  1507. },
  1508. {
  1509. "name": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1510. },
  1511. {
  1512. "name": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  1513. },
  1514. {
  1515. "name": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1516. },
  1517. {
  1518. "name": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor"
  1519. },
  1520. {
  1521. "name": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor"
  1522. },
  1523. {
  1524. "name": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)"
  1525. },
  1526. {
  1527. "name": "aten::argsort.dimname(Tensor self, str dim, bool descending=False) -> Tensor"
  1528. },
  1529. {
  1530. "name": "aten::argwhere(Tensor self) -> Tensor"
  1531. },
  1532. {
  1533. "name": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)"
  1534. },
  1535. {
  1536. "name": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)"
  1537. },
  1538. {
  1539. "name": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  1540. },
  1541. {
  1542. "name": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  1543. },
  1544. {
  1545. "name": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  1546. },
  1547. {
  1548. "name": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  1549. },
  1550. {
  1551. "name": "aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1552. },
  1553. {
  1554. "name": "aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1555. },
  1556. {
  1557. "name": "aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1558. },
  1559. {
  1560. "name": "aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1561. },
  1562. {
  1563. "name": "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"
  1564. },
  1565. {
  1566. "name": "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1567. },
  1568. {
  1569. "name": "aten::asin(Tensor self) -> Tensor"
  1570. },
  1571. {
  1572. "name": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1573. },
  1574. {
  1575. "name": "aten::asin.int(int a) -> float"
  1576. },
  1577. {
  1578. "name": "aten::asin.float(float a) -> float"
  1579. },
  1580. {
  1581. "name": "aten::asin.complex(complex a) -> complex"
  1582. },
  1583. {
  1584. "name": "aten::asin.Scalar(Scalar a) -> Scalar"
  1585. },
  1586. {
  1587. "name": "aten::asinh(Tensor self) -> Tensor"
  1588. },
  1589. {
  1590. "name": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1591. },
  1592. {
  1593. "name": "aten::asinh.int(int a) -> float"
  1594. },
  1595. {
  1596. "name": "aten::asinh.float(float a) -> float"
  1597. },
  1598. {
  1599. "name": "aten::asinh.complex(complex a) -> complex"
  1600. },
  1601. {
  1602. "name": "aten::asinh.Scalar(Scalar a) -> Scalar"
  1603. },
  1604. {
  1605. "name": "aten::atan(Tensor self) -> Tensor"
  1606. },
  1607. {
  1608. "name": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1609. },
  1610. {
  1611. "name": "aten::atan.int(int a) -> float"
  1612. },
  1613. {
  1614. "name": "aten::atan.float(float a) -> float"
  1615. },
  1616. {
  1617. "name": "aten::atan.complex(complex a) -> complex"
  1618. },
  1619. {
  1620. "name": "aten::atan.Scalar(Scalar a) -> Scalar"
  1621. },
  1622. {
  1623. "name": "aten::atan2(Tensor self, Tensor other) -> Tensor"
  1624. },
  1625. {
  1626. "name": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1627. },
  1628. {
  1629. "name": "aten::atan2.int(int a, int b) -> float"
  1630. },
  1631. {
  1632. "name": "aten::atan2.float(float a, float b) -> float"
  1633. },
  1634. {
  1635. "name": "aten::atan2.int_float(int a, float b) -> float"
  1636. },
  1637. {
  1638. "name": "aten::atan2.float_int(float a, int b) -> float"
  1639. },
  1640. {
  1641. "name": "aten::atan2.Scalar_Scalar(Scalar a, Scalar b) -> float"
  1642. },
  1643. {
  1644. "name": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1645. },
  1646. {
  1647. "name": "aten::atan_(Tensor(a!) self) -> Tensor(a!)"
  1648. },
  1649. {
  1650. "name": "aten::atanh(Tensor self) -> Tensor"
  1651. },
  1652. {
  1653. "name": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1654. },
  1655. {
  1656. "name": "aten::atanh.int(int a) -> float"
  1657. },
  1658. {
  1659. "name": "aten::atanh.float(float a) -> float"
  1660. },
  1661. {
  1662. "name": "aten::atanh.complex(complex a) -> complex"
  1663. },
  1664. {
  1665. "name": "aten::atanh.Scalar(Scalar a) -> Scalar"
  1666. },
  1667. {
  1668. "name": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)"
  1669. },
  1670. {
  1671. "name": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -> Tensor",
  1672. "category": "Pool"
  1673. },
  1674. {
  1675. "name": "aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)"
  1676. },
  1677. {
  1678. "name": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  1679. "category": "Pool"
  1680. },
  1681. {
  1682. "name": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  1683. },
  1684. {
  1685. "name": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  1686. "category": "Pool"
  1687. },
  1688. {
  1689. "name": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  1690. },
  1691. {
  1692. "name": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1693. },
  1694. {
  1695. "name": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1696. },
  1697. {
  1698. "name": "aten::baddbmm.dtype_out(Tensor self, Tensor batch1, Tensor batch2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1699. },
  1700. {
  1701. "name": "aten::baddbmm.dtype(Tensor self, Tensor batch1, Tensor batch2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1702. },
  1703. {
  1704. "name": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  1705. },
  1706. {
  1707. "name": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  1708. "category": "Normalization"
  1709. },
  1710. {
  1711. "name": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor"
  1712. },
  1713. {
  1714. "name": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1715. },
  1716. {
  1717. "name": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor"
  1718. },
  1719. {
  1720. "name": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor"
  1721. },
  1722. {
  1723. "name": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1724. },
  1725. {
  1726. "name": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1727. },
  1728. {
  1729. "name": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)"
  1730. },
  1731. {
  1732. "name": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)"
  1733. },
  1734. {
  1735. "name": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor"
  1736. },
  1737. {
  1738. "name": "aten::bin(int i) -> str"
  1739. },
  1740. {
  1741. "name": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  1742. },
  1743. {
  1744. "name": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  1745. },
  1746. {
  1747. "name": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  1748. },
  1749. {
  1750. "name": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) grad_input) -> Tensor(a!)"
  1751. },
  1752. {
  1753. "name": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1) -> Tensor"
  1754. },
  1755. {
  1756. "name": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  1757. },
  1758. {
  1759. "name": "aten::bincount(Tensor self, Tensor? weights=None, SymInt minlength=0) -> Tensor"
  1760. },
  1761. {
  1762. "name": "aten::bincount.out(Tensor self, Tensor? weights=None, SymInt minlength=0, *, Tensor(a!) out) -> Tensor(a!)"
  1763. },
  1764. {
  1765. "name": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor"
  1766. },
  1767. {
  1768. "name": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"
  1769. },
  1770. {
  1771. "name": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor"
  1772. },
  1773. {
  1774. "name": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor"
  1775. },
  1776. {
  1777. "name": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1778. },
  1779. {
  1780. "name": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1781. },
  1782. {
  1783. "name": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1784. },
  1785. {
  1786. "name": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1787. },
  1788. {
  1789. "name": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1790. },
  1791. {
  1792. "name": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1793. },
  1794. {
  1795. "name": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  1796. },
  1797. {
  1798. "name": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  1799. },
  1800. {
  1801. "name": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1802. },
  1803. {
  1804. "name": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1805. },
  1806. {
  1807. "name": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1808. },
  1809. {
  1810. "name": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1811. },
  1812. {
  1813. "name": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1814. },
  1815. {
  1816. "name": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1817. },
  1818. {
  1819. "name": "aten::bitwise_not(Tensor self) -> Tensor"
  1820. },
  1821. {
  1822. "name": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1823. },
  1824. {
  1825. "name": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)"
  1826. },
  1827. {
  1828. "name": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor"
  1829. },
  1830. {
  1831. "name": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor"
  1832. },
  1833. {
  1834. "name": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1835. },
  1836. {
  1837. "name": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1838. },
  1839. {
  1840. "name": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1841. },
  1842. {
  1843. "name": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1844. },
  1845. {
  1846. "name": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1847. },
  1848. {
  1849. "name": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1850. },
  1851. {
  1852. "name": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  1853. },
  1854. {
  1855. "name": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  1856. },
  1857. {
  1858. "name": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1859. },
  1860. {
  1861. "name": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1862. },
  1863. {
  1864. "name": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1865. },
  1866. {
  1867. "name": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1868. },
  1869. {
  1870. "name": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1871. },
  1872. {
  1873. "name": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1874. },
  1875. {
  1876. "name": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor"
  1877. },
  1878. {
  1879. "name": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor"
  1880. },
  1881. {
  1882. "name": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1883. },
  1884. {
  1885. "name": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1886. },
  1887. {
  1888. "name": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1889. },
  1890. {
  1891. "name": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1892. },
  1893. {
  1894. "name": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1895. },
  1896. {
  1897. "name": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1898. },
  1899. {
  1900. "name": "aten::block_diag(Tensor[] tensors) -> Tensor"
  1901. },
  1902. {
  1903. "name": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  1904. },
  1905. {
  1906. "name": "aten::bmm(Tensor self, Tensor mat2) -> Tensor"
  1907. },
  1908. {
  1909. "name": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  1910. },
  1911. {
  1912. "name": "aten::bmm.dtype_out(Tensor self, Tensor mat2, ScalarType out_dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1913. },
  1914. {
  1915. "name": "aten::bmm.dtype(Tensor self, Tensor mat2, ScalarType out_dtype) -> Tensor"
  1916. },
  1917. {
  1918. "name": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]"
  1919. },
  1920. {
  1921. "name": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  1922. },
  1923. {
  1924. "name": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  1925. },
  1926. {
  1927. "name": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  1928. },
  1929. {
  1930. "name": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  1931. },
  1932. {
  1933. "name": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  1934. },
  1935. {
  1936. "name": "aten::cartesian_prod(Tensor[] tensors) -> Tensor"
  1937. },
  1938. {
  1939. "name": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
  1940. "category": "Tensor"
  1941. },
  1942. {
  1943. "name": "aten::cat.names(Tensor[] tensors, str dim) -> Tensor",
  1944. "category": "Tensor"
  1945. },
  1946. {
  1947. "name": "aten::cat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)",
  1948. "category": "Tensor"
  1949. },
  1950. {
  1951. "name": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)",
  1952. "category": "Tensor"
  1953. },
  1954. {
  1955. "name": "aten::cauchy_(Tensor(a!) self, float median=0., float sigma=1., *, Generator? generator=None) -> Tensor(a!)"
  1956. },
  1957. {
  1958. "name": "aten::cdist(Tensor x1, Tensor x2, float p=2., int? compute_mode=None) -> Tensor"
  1959. },
  1960. {
  1961. "name": "aten::ceil(Tensor self) -> Tensor"
  1962. },
  1963. {
  1964. "name": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1965. },
  1966. {
  1967. "name": "aten::ceil.int(int a) -> int"
  1968. },
  1969. {
  1970. "name": "aten::ceil.float(float a) -> int"
  1971. },
  1972. {
  1973. "name": "aten::ceil.Scalar(Scalar a) -> Scalar"
  1974. },
  1975. {
  1976. "name": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)"
  1977. },
  1978. {
  1979. "name": "aten::celu(Tensor self, Scalar alpha=1.) -> Tensor",
  1980. "category": "Activation"
  1981. },
  1982. {
  1983. "name": "aten::celu.out(Tensor self, Scalar alpha=1., *, Tensor(a!) out) -> Tensor(a!)"
  1984. },
  1985. {
  1986. "name": "aten::celu_(Tensor(a!) self, Scalar alpha=1.) -> Tensor(a!)"
  1987. },
  1988. {
  1989. "name": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor"
  1990. },
  1991. {
  1992. "name": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1993. },
  1994. {
  1995. "name": "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor"
  1996. },
  1997. {
  1998. "name": "aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)"
  1999. },
  2000. {
  2001. "name": "aten::chr(int i) -> str"
  2002. },
  2003. {
  2004. "name": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]"
  2005. },
  2006. {
  2007. "name": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  2008. },
  2009. {
  2010. "name": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  2011. },
  2012. {
  2013. "name": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  2014. },
  2015. {
  2016. "name": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  2017. },
  2018. {
  2019. "name": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  2020. },
  2021. {
  2022. "name": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  2023. },
  2024. {
  2025. "name": "aten::clamp_max(Tensor self, Scalar max) -> Tensor"
  2026. },
  2027. {
  2028. "name": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor"
  2029. },
  2030. {
  2031. "name": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)"
  2032. },
  2033. {
  2034. "name": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)"
  2035. },
  2036. {
  2037. "name": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)"
  2038. },
  2039. {
  2040. "name": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)"
  2041. },
  2042. {
  2043. "name": "aten::clamp_min(Tensor self, Scalar min) -> Tensor"
  2044. },
  2045. {
  2046. "name": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor"
  2047. },
  2048. {
  2049. "name": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)"
  2050. },
  2051. {
  2052. "name": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)"
  2053. },
  2054. {
  2055. "name": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)"
  2056. },
  2057. {
  2058. "name": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)"
  2059. },
  2060. {
  2061. "name": "aten::clear.t(t[](a!) self) -> ()"
  2062. },
  2063. {
  2064. "name": "aten::clear.str(Dict(str, t)(a!) self) -> ()"
  2065. },
  2066. {
  2067. "name": "aten::clear.int(Dict(int, t)(a!) self) -> ()"
  2068. },
  2069. {
  2070. "name": "aten::clear.bool(Dict(bool, t)(a!) self) -> ()"
  2071. },
  2072. {
  2073. "name": "aten::clear.float(Dict(float, t)(a!) self) -> ()"
  2074. },
  2075. {
  2076. "name": "aten::clear.complex(Dict(complex, t)(a!) self) -> ()"
  2077. },
  2078. {
  2079. "name": "aten::clear.Tensor(Dict(Tensor, t)(a!) self) -> ()"
  2080. },
  2081. {
  2082. "name": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  2083. },
  2084. {
  2085. "name": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  2086. },
  2087. {
  2088. "name": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  2089. },
  2090. {
  2091. "name": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  2092. },
  2093. {
  2094. "name": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  2095. },
  2096. {
  2097. "name": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  2098. },
  2099. {
  2100. "name": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor"
  2101. },
  2102. {
  2103. "name": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2104. },
  2105. {
  2106. "name": "aten::coalesce(Tensor(a) self) -> Tensor(a)"
  2107. },
  2108. {
  2109. "name": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  2110. },
  2111. {
  2112. "name": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  2113. },
  2114. {
  2115. "name": "aten::col_indices(Tensor(a) self) -> Tensor(a)"
  2116. },
  2117. {
  2118. "name": "aten::column_stack(Tensor[] tensors) -> Tensor"
  2119. },
  2120. {
  2121. "name": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  2122. },
  2123. {
  2124. "name": "aten::complex(Tensor real, Tensor imag) -> Tensor"
  2125. },
  2126. {
  2127. "name": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)"
  2128. },
  2129. {
  2130. "name": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor",
  2131. "category": "Tensor"
  2132. },
  2133. {
  2134. "name": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  2135. },
  2136. {
  2137. "name": "aten::concat.names(Tensor[] tensors, str dim) -> Tensor",
  2138. "category": "Tensor"
  2139. },
  2140. {
  2141. "name": "aten::concat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  2142. },
  2143. {
  2144. "name": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor"
  2145. },
  2146. {
  2147. "name": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  2148. },
  2149. {
  2150. "name": "aten::concatenate.names(Tensor[] tensors, str dim) -> Tensor"
  2151. },
  2152. {
  2153. "name": "aten::concatenate.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  2154. },
  2155. {
  2156. "name": "aten::conj(Tensor(a) self) -> Tensor(a)"
  2157. },
  2158. {
  2159. "name": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor",
  2160. "category": "Tensor"
  2161. },
  2162. {
  2163. "name": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)"
  2164. },
  2165. {
  2166. "name": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=0) -> Tensor(a)"
  2167. },
  2168. {
  2169. "name": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  2170. "category": "Layer"
  2171. },
  2172. {
  2173. "name": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], str padding=\"valid\", SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  2174. "category": "Layer"
  2175. },
  2176. {
  2177. "name": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  2178. "category": "Layer"
  2179. },
  2180. {
  2181. "name": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], str padding=\"valid\", SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  2182. "category": "Layer"
  2183. },
  2184. {
  2185. "name": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  2186. "category": "Layer"
  2187. },
  2188. {
  2189. "name": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], str padding=\"valid\", SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  2190. "category": "Layer"
  2191. },
  2192. {
  2193. "name": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] output_padding=[0], SymInt groups=1, SymInt[1] dilation=[1]) -> Tensor",
  2194. "category": "Layer"
  2195. },
  2196. {
  2197. "name": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] output_padding=[0, 0], SymInt groups=1, SymInt[2] dilation=[1, 1]) -> Tensor",
  2198. "category": "Layer"
  2199. },
  2200. {
  2201. "name": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] output_padding=[0, 0, 0], SymInt groups=1, SymInt[3] dilation=[1, 1, 1]) -> Tensor",
  2202. "category": "Layer"
  2203. },
  2204. {
  2205. "name": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor",
  2206. "category": "Layer"
  2207. },
  2208. {
  2209. "name": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)",
  2210. "category": "Layer"
  2211. },
  2212. {
  2213. "name": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  2214. },
  2215. {
  2216. "name": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  2217. },
  2218. {
  2219. "name": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)"
  2220. },
  2221. {
  2222. "name": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  2223. },
  2224. {
  2225. "name": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor"
  2226. },
  2227. {
  2228. "name": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  2229. },
  2230. {
  2231. "name": "aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor"
  2232. },
  2233. {
  2234. "name": "aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)"
  2235. },
  2236. {
  2237. "name": "aten::copy.t(t[](a) self) -> t[]"
  2238. },
  2239. {
  2240. "name": "aten::copy.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)"
  2241. },
  2242. {
  2243. "name": "aten::copy.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)"
  2244. },
  2245. {
  2246. "name": "aten::copy.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)"
  2247. },
  2248. {
  2249. "name": "aten::copy.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)"
  2250. },
  2251. {
  2252. "name": "aten::copy.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)"
  2253. },
  2254. {
  2255. "name": "aten::copy.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)"
  2256. },
  2257. {
  2258. "name": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)"
  2259. },
  2260. {
  2261. "name": "aten::copy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2262. },
  2263. {
  2264. "name": "aten::copy_.int(Tensor(a!) self, int other) -> Tensor(a!)"
  2265. },
  2266. {
  2267. "name": "aten::copy_.float(Tensor(a!) self, float other) -> Tensor(a!)"
  2268. },
  2269. {
  2270. "name": "aten::cos(Tensor self) -> Tensor"
  2271. },
  2272. {
  2273. "name": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2274. },
  2275. {
  2276. "name": "aten::cos.int(int a) -> float"
  2277. },
  2278. {
  2279. "name": "aten::cos.float(float a) -> float"
  2280. },
  2281. {
  2282. "name": "aten::cos.complex(complex a) -> complex"
  2283. },
  2284. {
  2285. "name": "aten::cos.Scalar(Scalar a) -> Scalar"
  2286. },
  2287. {
  2288. "name": "aten::cosh(Tensor self) -> Tensor"
  2289. },
  2290. {
  2291. "name": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2292. },
  2293. {
  2294. "name": "aten::cosh.int(int a) -> float"
  2295. },
  2296. {
  2297. "name": "aten::cosh.float(float a) -> float"
  2298. },
  2299. {
  2300. "name": "aten::cosh.complex(complex a) -> complex"
  2301. },
  2302. {
  2303. "name": "aten::cosh.Scalar(Scalar a) -> Scalar"
  2304. },
  2305. {
  2306. "name": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor"
  2307. },
  2308. {
  2309. "name": "aten::count(str self, str substr, int start=0, int end=-1) -> int"
  2310. },
  2311. {
  2312. "name": "aten::count.int(int[] self, int el) -> int"
  2313. },
  2314. {
  2315. "name": "aten::count.float(float[] self, float el) -> int"
  2316. },
  2317. {
  2318. "name": "aten::count.bool(bool[] self, bool el) -> int"
  2319. },
  2320. {
  2321. "name": "aten::count.Tensor(Tensor[] self, Tensor el) -> int"
  2322. },
  2323. {
  2324. "name": "aten::count.str(str[] self, str el) -> int"
  2325. },
  2326. {
  2327. "name": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor"
  2328. },
  2329. {
  2330. "name": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)"
  2331. },
  2332. {
  2333. "name": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor"
  2334. },
  2335. {
  2336. "name": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  2337. },
  2338. {
  2339. "name": "aten::cpu(Tensor(a) self) -> Tensor(a|b)"
  2340. },
  2341. {
  2342. "name": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"
  2343. },
  2344. {
  2345. "name": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  2346. },
  2347. {
  2348. "name": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, float label_smoothing=0.) -> Tensor"
  2349. },
  2350. {
  2351. "name": "aten::crow_indices(Tensor(a) self) -> Tensor(a)"
  2352. },
  2353. {
  2354. "name": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  2355. },
  2356. {
  2357. "name": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  2358. },
  2359. {
  2360. "name": "aten::cuda(Tensor(a) self) -> Tensor(a|b)"
  2361. },
  2362. {
  2363. "name": "aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)"
  2364. },
  2365. {
  2366. "name": "aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  2367. },
  2368. {
  2369. "name": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  2370. },
  2371. {
  2372. "name": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  2373. },
  2374. {
  2375. "name": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  2376. },
  2377. {
  2378. "name": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  2379. },
  2380. {
  2381. "name": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)"
  2382. },
  2383. {
  2384. "name": "aten::cummax.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)"
  2385. },
  2386. {
  2387. "name": "aten::cummax.dimname_out(Tensor self, str dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2388. },
  2389. {
  2390. "name": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2391. },
  2392. {
  2393. "name": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor"
  2394. },
  2395. {
  2396. "name": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  2397. },
  2398. {
  2399. "name": "aten::cumprod.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  2400. },
  2401. {
  2402. "name": "aten::cumprod.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2403. },
  2404. {
  2405. "name": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2406. },
  2407. {
  2408. "name": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  2409. },
  2410. {
  2411. "name": "aten::cumsum.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  2412. },
  2413. {
  2414. "name": "aten::cumsum.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2415. },
  2416. {
  2417. "name": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2418. },
  2419. {
  2420. "name": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  2421. },
  2422. {
  2423. "name": "aten::cumsum_.dimname(Tensor(a!) self, str dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  2424. },
  2425. {
  2426. "name": "aten::dequantize.self(Tensor self) -> Tensor",
  2427. "category": "Quantization"
  2428. },
  2429. {
  2430. "name": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2431. },
  2432. {
  2433. "name": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()"
  2434. },
  2435. {
  2436. "name": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]",
  2437. "category": "Quantization"
  2438. },
  2439. {
  2440. "name": "aten::dequantize.tensor(Tensor qtensor) -> Tensor",
  2441. "category": "Quantization"
  2442. },
  2443. {
  2444. "name": "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]",
  2445. "category": "Quantization"
  2446. },
  2447. {
  2448. "name": "aten::dequantize.any(Any tensors) -> Any",
  2449. "category": "Quantization"
  2450. },
  2451. {
  2452. "name": "aten::detach(Tensor(a) self) -> Tensor(a)"
  2453. },
  2454. {
  2455. "name": "aten::detach_(Tensor(a!) self) -> Tensor(a!)"
  2456. },
  2457. {
  2458. "name": "aten::detach_copy(Tensor self) -> Tensor"
  2459. },
  2460. {
  2461. "name": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2462. },
  2463. {
  2464. "name": "aten::device(str a) -> Device"
  2465. },
  2466. {
  2467. "name": "aten::device.with_index(str type, int index) -> Device"
  2468. },
  2469. {
  2470. "name": "aten::diag(Tensor self, int diagonal=0) -> Tensor"
  2471. },
  2472. {
  2473. "name": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  2474. },
  2475. {
  2476. "name": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor"
  2477. },
  2478. {
  2479. "name": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)"
  2480. },
  2481. {
  2482. "name": "aten::diagflat(Tensor self, int offset=0) -> Tensor"
  2483. },
  2484. {
  2485. "name": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)"
  2486. },
  2487. {
  2488. "name": "aten::diagonal.Dimname(Tensor(a) self, *, str outdim, str dim1, str dim2, int offset=0) -> Tensor(a)"
  2489. },
  2490. {
  2491. "name": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor"
  2492. },
  2493. {
  2494. "name": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)"
  2495. },
  2496. {
  2497. "name": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  2498. },
  2499. {
  2500. "name": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  2501. },
  2502. {
  2503. "name": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  2504. },
  2505. {
  2506. "name": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  2507. },
  2508. {
  2509. "name": "aten::dict() -> Dict(str, Tensor)"
  2510. },
  2511. {
  2512. "name": "aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)"
  2513. },
  2514. {
  2515. "name": "aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)"
  2516. },
  2517. {
  2518. "name": "aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)"
  2519. },
  2520. {
  2521. "name": "aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)"
  2522. },
  2523. {
  2524. "name": "aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)"
  2525. },
  2526. {
  2527. "name": "aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)"
  2528. },
  2529. {
  2530. "name": "aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)"
  2531. },
  2532. {
  2533. "name": "aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)"
  2534. },
  2535. {
  2536. "name": "aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)"
  2537. },
  2538. {
  2539. "name": "aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)"
  2540. },
  2541. {
  2542. "name": "aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)"
  2543. },
  2544. {
  2545. "name": "aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)"
  2546. },
  2547. {
  2548. "name": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor"
  2549. },
  2550. {
  2551. "name": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)"
  2552. },
  2553. {
  2554. "name": "aten::dim(Tensor self) -> int"
  2555. },
  2556. {
  2557. "name": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor"
  2558. },
  2559. {
  2560. "name": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  2561. },
  2562. {
  2563. "name": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor"
  2564. },
  2565. {
  2566. "name": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor"
  2567. },
  2568. {
  2569. "name": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  2570. },
  2571. {
  2572. "name": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  2573. },
  2574. {
  2575. "name": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2576. },
  2577. {
  2578. "name": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2579. },
  2580. {
  2581. "name": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2582. },
  2583. {
  2584. "name": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2585. },
  2586. {
  2587. "name": "aten::div.int(int a, int b) -> float"
  2588. },
  2589. {
  2590. "name": "aten::div.complex(complex a, complex b) -> complex"
  2591. },
  2592. {
  2593. "name": "aten::div.float(float a, float b) -> float"
  2594. },
  2595. {
  2596. "name": "aten::div(Scalar a, Scalar b) -> float"
  2597. },
  2598. {
  2599. "name": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2600. },
  2601. {
  2602. "name": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  2603. },
  2604. {
  2605. "name": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2606. },
  2607. {
  2608. "name": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  2609. },
  2610. {
  2611. "name": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor"
  2612. },
  2613. {
  2614. "name": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor"
  2615. },
  2616. {
  2617. "name": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  2618. },
  2619. {
  2620. "name": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  2621. },
  2622. {
  2623. "name": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2624. },
  2625. {
  2626. "name": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2627. },
  2628. {
  2629. "name": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2630. },
  2631. {
  2632. "name": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  2633. },
  2634. {
  2635. "name": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  2636. },
  2637. {
  2638. "name": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2639. },
  2640. {
  2641. "name": "aten::divmod.int(int x, int y) -> (int, int)"
  2642. },
  2643. {
  2644. "name": "aten::divmod.float(float x, float y) -> (float, float)"
  2645. },
  2646. {
  2647. "name": "aten::divmod.int_float(int x, float y) -> (float, float)"
  2648. },
  2649. {
  2650. "name": "aten::divmod.float_int(float x, int y) -> (float, float)"
  2651. },
  2652. {
  2653. "name": "aten::dot(Tensor self, Tensor tensor) -> Tensor"
  2654. },
  2655. {
  2656. "name": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)"
  2657. },
  2658. {
  2659. "name": "aten::dropout(Tensor input, float p, bool train) -> Tensor",
  2660. "category": "Dropout"
  2661. },
  2662. {
  2663. "name": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2664. "category": "Dropout"
  2665. },
  2666. {
  2667. "name": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor"
  2668. },
  2669. {
  2670. "name": "aten::einsum.sublist(Tensor a, ...) -> Tensor"
  2671. },
  2672. {
  2673. "name": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor",
  2674. "category": "Activation"
  2675. },
  2676. {
  2677. "name": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)"
  2678. },
  2679. {
  2680. "name": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)",
  2681. "category": "Activation"
  2682. },
  2683. {
  2684. "name": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
  2685. "category": "Transform"
  2686. },
  2687. {
  2688. "name": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)",
  2689. "category": "Transform"
  2690. },
  2691. {
  2692. "name": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)",
  2693. "category": "Transform"
  2694. },
  2695. {
  2696. "name": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)",
  2697. "category": "Transform"
  2698. },
  2699. {
  2700. "name": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)",
  2701. "category": "Transform"
  2702. },
  2703. {
  2704. "name": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2705. },
  2706. {
  2707. "name": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2708. },
  2709. {
  2710. "name": "aten::empty.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2711. },
  2712. {
  2713. "name": "aten::empty.names_out(int[] size, *, str[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2714. },
  2715. {
  2716. "name": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2717. },
  2718. {
  2719. "name": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2720. },
  2721. {
  2722. "name": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"
  2723. },
  2724. {
  2725. "name": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"
  2726. },
  2727. {
  2728. "name": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2729. },
  2730. {
  2731. "name": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2732. },
  2733. {
  2734. "name": "aten::eq.int_list(int[] a, int[] b) -> bool"
  2735. },
  2736. {
  2737. "name": "aten::eq.device(Device a, Device b) -> bool"
  2738. },
  2739. {
  2740. "name": "aten::eq.bool(bool a, bool b) -> bool"
  2741. },
  2742. {
  2743. "name": "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"
  2744. },
  2745. {
  2746. "name": "aten::eq.int(int a, int b) -> bool"
  2747. },
  2748. {
  2749. "name": "aten::eq.complex(complex a, complex b) -> bool"
  2750. },
  2751. {
  2752. "name": "aten::eq.float(float a, float b) -> bool"
  2753. },
  2754. {
  2755. "name": "aten::eq.int_float(int a, float b) -> bool"
  2756. },
  2757. {
  2758. "name": "aten::eq.float_int(float a, int b) -> bool"
  2759. },
  2760. {
  2761. "name": "aten::eq.float_complex(float a, complex b) -> bool"
  2762. },
  2763. {
  2764. "name": "aten::eq.complex_float(complex a, float b) -> bool"
  2765. },
  2766. {
  2767. "name": "aten::eq(Scalar a, Scalar b) -> bool"
  2768. },
  2769. {
  2770. "name": "aten::eq.str(str a, str b) -> bool"
  2771. },
  2772. {
  2773. "name": "aten::eq.float_list(float[] a, float[] b) -> bool"
  2774. },
  2775. {
  2776. "name": "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  2777. },
  2778. {
  2779. "name": "aten::eq.bool_list(bool[] a, bool[] b) -> bool"
  2780. },
  2781. {
  2782. "name": "aten::eq.str_list(str[] a, str[] b) -> bool"
  2783. },
  2784. {
  2785. "name": "aten::equal(Tensor self, Tensor other) -> bool"
  2786. },
  2787. {
  2788. "name": "aten::erf(Tensor self) -> Tensor"
  2789. },
  2790. {
  2791. "name": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2792. },
  2793. {
  2794. "name": "aten::erf.int(int a) -> float"
  2795. },
  2796. {
  2797. "name": "aten::erf.float(float a) -> float"
  2798. },
  2799. {
  2800. "name": "aten::erf.Scalar(Scalar a) -> Scalar"
  2801. },
  2802. {
  2803. "name": "aten::erfc(Tensor self) -> Tensor"
  2804. },
  2805. {
  2806. "name": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2807. },
  2808. {
  2809. "name": "aten::erfc.int(int a) -> float"
  2810. },
  2811. {
  2812. "name": "aten::erfc.float(float a) -> float"
  2813. },
  2814. {
  2815. "name": "aten::erfc.Scalar(Scalar a) -> Scalar"
  2816. },
  2817. {
  2818. "name": "aten::erfinv(Tensor self) -> Tensor"
  2819. },
  2820. {
  2821. "name": "aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2822. },
  2823. {
  2824. "name": "aten::exp(Tensor self) -> Tensor"
  2825. },
  2826. {
  2827. "name": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2828. },
  2829. {
  2830. "name": "aten::exp.int(int a) -> float"
  2831. },
  2832. {
  2833. "name": "aten::exp.float(float a) -> float"
  2834. },
  2835. {
  2836. "name": "aten::exp.complex(complex a) -> complex"
  2837. },
  2838. {
  2839. "name": "aten::exp.Scalar(Scalar a) -> Scalar"
  2840. },
  2841. {
  2842. "name": "aten::exp2(Tensor self) -> Tensor"
  2843. },
  2844. {
  2845. "name": "aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2846. },
  2847. {
  2848. "name": "aten::exp_(Tensor(a!) self) -> Tensor(a!)"
  2849. },
  2850. {
  2851. "name": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)"
  2852. },
  2853. {
  2854. "name": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  2855. },
  2856. {
  2857. "name": "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor"
  2858. },
  2859. {
  2860. "name": "aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)"
  2861. },
  2862. {
  2863. "name": "aten::expm1(Tensor self) -> Tensor"
  2864. },
  2865. {
  2866. "name": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2867. },
  2868. {
  2869. "name": "aten::expm1.int(int a) -> float"
  2870. },
  2871. {
  2872. "name": "aten::expm1.float(float a) -> float"
  2873. },
  2874. {
  2875. "name": "aten::expm1.Scalar(Scalar a) -> Scalar"
  2876. },
  2877. {
  2878. "name": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)"
  2879. },
  2880. {
  2881. "name": "aten::exponential_(Tensor(a!) self, float lambd=1., *, Generator? generator=None) -> Tensor(a!)"
  2882. },
  2883. {
  2884. "name": "aten::extend.t(t[](a!) self, t[] other) -> ()"
  2885. },
  2886. {
  2887. "name": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2888. },
  2889. {
  2890. "name": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2891. },
  2892. {
  2893. "name": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  2894. },
  2895. {
  2896. "name": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)"
  2897. },
  2898. {
  2899. "name": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor",
  2900. "category": "Quantization"
  2901. },
  2902. {
  2903. "name": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor",
  2904. "category": "Quantization"
  2905. },
  2906. {
  2907. "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor",
  2908. "category": "Quantization"
  2909. },
  2910. {
  2911. "name": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)",
  2912. "category": "Quantization"
  2913. },
  2914. {
  2915. "name": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))",
  2916. "category": "Quantization"
  2917. },
  2918. {
  2919. "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor",
  2920. "category": "Quantization"
  2921. },
  2922. {
  2923. "name": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  2924. "category": "Dropout"
  2925. },
  2926. {
  2927. "name": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2928. "category": "Dropout"
  2929. },
  2930. {
  2931. "name": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor",
  2932. "category": "Dropout"
  2933. },
  2934. {
  2935. "name": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2936. "category": "Dropout"
  2937. },
  2938. {
  2939. "name": "aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor"
  2940. },
  2941. {
  2942. "name": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2943. },
  2944. {
  2945. "name": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2946. },
  2947. {
  2948. "name": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2949. },
  2950. {
  2951. "name": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2952. },
  2953. {
  2954. "name": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2955. },
  2956. {
  2957. "name": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2958. },
  2959. {
  2960. "name": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor"
  2961. },
  2962. {
  2963. "name": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2964. },
  2965. {
  2966. "name": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2967. },
  2968. {
  2969. "name": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2970. },
  2971. {
  2972. "name": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2973. },
  2974. {
  2975. "name": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2976. },
  2977. {
  2978. "name": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2979. },
  2980. {
  2981. "name": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2982. },
  2983. {
  2984. "name": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2985. },
  2986. {
  2987. "name": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2988. },
  2989. {
  2990. "name": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2991. },
  2992. {
  2993. "name": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor"
  2994. },
  2995. {
  2996. "name": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2997. },
  2998. {
  2999. "name": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3000. },
  3001. {
  3002. "name": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3003. },
  3004. {
  3005. "name": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3006. },
  3007. {
  3008. "name": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  3009. },
  3010. {
  3011. "name": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3012. },
  3013. {
  3014. "name": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3015. },
  3016. {
  3017. "name": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3018. },
  3019. {
  3020. "name": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3021. },
  3022. {
  3023. "name": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3024. },
  3025. {
  3026. "name": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  3027. },
  3028. {
  3029. "name": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3030. },
  3031. {
  3032. "name": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3033. },
  3034. {
  3035. "name": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3036. },
  3037. {
  3038. "name": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3039. },
  3040. {
  3041. "name": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3042. },
  3043. {
  3044. "name": "aten::fill.Scalar(Tensor self, Scalar value) -> Tensor"
  3045. },
  3046. {
  3047. "name": "aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  3048. },
  3049. {
  3050. "name": "aten::fill.Tensor(Tensor self, Tensor value) -> Tensor"
  3051. },
  3052. {
  3053. "name": "aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  3054. },
  3055. {
  3056. "name": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)"
  3057. },
  3058. {
  3059. "name": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)"
  3060. },
  3061. {
  3062. "name": "aten::find(str self, str substr, int start=0, int end=-1) -> int"
  3063. },
  3064. {
  3065. "name": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
  3066. "category": "Shape"
  3067. },
  3068. {
  3069. "name": "aten::flatten.DimnameList(Tensor(a) self, str[] dims, str out_dim) -> Tensor(a)",
  3070. "category": "Shape"
  3071. },
  3072. {
  3073. "name": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, str out_dim) -> Tensor(a)",
  3074. "category": "Shape"
  3075. },
  3076. {
  3077. "name": "aten::flatten.using_names(Tensor(a) self, str start_dim, str end_dim, str out_dim) -> Tensor(a)",
  3078. "category": "Shape"
  3079. },
  3080. {
  3081. "name": "aten::flip(Tensor self, int[] dims) -> Tensor"
  3082. },
  3083. {
  3084. "name": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  3085. },
  3086. {
  3087. "name": "aten::floor(Tensor self) -> Tensor"
  3088. },
  3089. {
  3090. "name": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3091. },
  3092. {
  3093. "name": "aten::floor.int(int a) -> int"
  3094. },
  3095. {
  3096. "name": "aten::floor.float(float a) -> int"
  3097. },
  3098. {
  3099. "name": "aten::floor.Scalar(Scalar a) -> Scalar"
  3100. },
  3101. {
  3102. "name": "aten::floor_(Tensor(a!) self) -> Tensor(a!)"
  3103. },
  3104. {
  3105. "name": "aten::floor_divide(Tensor self, Tensor other) -> Tensor"
  3106. },
  3107. {
  3108. "name": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  3109. },
  3110. {
  3111. "name": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3112. },
  3113. {
  3114. "name": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3115. },
  3116. {
  3117. "name": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3118. },
  3119. {
  3120. "name": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3121. },
  3122. {
  3123. "name": "aten::floordiv.int(int a, int b) -> int"
  3124. },
  3125. {
  3126. "name": "aten::floordiv.float(float a, float b) -> float"
  3127. },
  3128. {
  3129. "name": "aten::floordiv.int_float(int a, float b) -> float"
  3130. },
  3131. {
  3132. "name": "aten::floordiv.float_int(float a, int b) -> float"
  3133. },
  3134. {
  3135. "name": "aten::floordiv(Scalar a, Scalar b) -> Scalar"
  3136. },
  3137. {
  3138. "name": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor"
  3139. },
  3140. {
  3141. "name": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor"
  3142. },
  3143. {
  3144. "name": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3145. },
  3146. {
  3147. "name": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3148. },
  3149. {
  3150. "name": "aten::fmod.int(int a, int b) -> float"
  3151. },
  3152. {
  3153. "name": "aten::fmod.float(float a, float b) -> float"
  3154. },
  3155. {
  3156. "name": "aten::fmod.int_float(int a, float b) -> float"
  3157. },
  3158. {
  3159. "name": "aten::fmod.float_int(float a, int b) -> float"
  3160. },
  3161. {
  3162. "name": "aten::fmod(Scalar a, Scalar b) -> float"
  3163. },
  3164. {
  3165. "name": "aten::format(str self, ...) -> str",
  3166. "is_vararg": true
  3167. },
  3168. {
  3169. "name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor",
  3170. "category": "Normalization"
  3171. },
  3172. {
  3173. "name": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  3174. },
  3175. {
  3176. "name": "aten::full.names(int[] size, Scalar fill_value, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3177. },
  3178. {
  3179. "name": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3180. },
  3181. {
  3182. "name": "aten::full.names_out(int[] size, Scalar fill_value, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  3183. },
  3184. {
  3185. "name": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  3186. },
  3187. {
  3188. "name": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  3189. },
  3190. {
  3191. "name": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  3192. },
  3193. {
  3194. "name": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor"
  3195. },
  3196. {
  3197. "name": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  3198. "category": "Transform"
  3199. },
  3200. {
  3201. "name": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  3202. "category": "Transform"
  3203. },
  3204. {
  3205. "name": "aten::gather.dimname(Tensor self, str dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  3206. "category": "Transform"
  3207. },
  3208. {
  3209. "name": "aten::gather.dimname_out(Tensor self, str dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  3210. "category": "Transform"
  3211. },
  3212. {
  3213. "name": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor"
  3214. },
  3215. {
  3216. "name": "aten::gcd(Tensor self, Tensor other) -> Tensor"
  3217. },
  3218. {
  3219. "name": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3220. },
  3221. {
  3222. "name": "aten::gcd.int(int a, int b) -> int"
  3223. },
  3224. {
  3225. "name": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3226. },
  3227. {
  3228. "name": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"
  3229. },
  3230. {
  3231. "name": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"
  3232. },
  3233. {
  3234. "name": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3235. },
  3236. {
  3237. "name": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3238. },
  3239. {
  3240. "name": "aten::ge.int(int a, int b) -> bool"
  3241. },
  3242. {
  3243. "name": "aten::ge.float(float a, float b) -> bool"
  3244. },
  3245. {
  3246. "name": "aten::ge.int_float(int a, float b) -> bool"
  3247. },
  3248. {
  3249. "name": "aten::ge.float_int(float a, int b) -> bool"
  3250. },
  3251. {
  3252. "name": "aten::ge(Scalar a, Scalar b) -> bool"
  3253. },
  3254. {
  3255. "name": "aten::ge.str(str a, str b) -> bool"
  3256. },
  3257. {
  3258. "name": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3259. },
  3260. {
  3261. "name": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3262. },
  3263. {
  3264. "name": "aten::gelu(Tensor self, *, str approximate=\"none\") -> Tensor",
  3265. "category": "Activation"
  3266. },
  3267. {
  3268. "name": "aten::gelu.out(Tensor self, *, str approximate=\"none\", Tensor(a!) out) -> Tensor(a!)",
  3269. "category": "Activation"
  3270. },
  3271. {
  3272. "name": "aten::gelu_(Tensor(a!) self, *, str approximate=\"none\") -> Tensor(a!)",
  3273. "category": "Activation"
  3274. },
  3275. {
  3276. "name": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate=\"none\") -> Tensor",
  3277. "category": "Activation"
  3278. },
  3279. {
  3280. "name": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate=\"none\", Tensor(a!) grad_input) -> Tensor(a!)",
  3281. "category": "Activation"
  3282. },
  3283. {
  3284. "name": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)"
  3285. },
  3286. {
  3287. "name": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)"
  3288. },
  3289. {
  3290. "name": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)"
  3291. },
  3292. {
  3293. "name": "aten::ger(Tensor self, Tensor vec2) -> Tensor"
  3294. },
  3295. {
  3296. "name": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  3297. },
  3298. {
  3299. "name": "aten::get.str(Dict(str, t) self, str key) -> t(*)?"
  3300. },
  3301. {
  3302. "name": "aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)"
  3303. },
  3304. {
  3305. "name": "aten::get.int(Dict(int, t) self, int key) -> t(*)?"
  3306. },
  3307. {
  3308. "name": "aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)"
  3309. },
  3310. {
  3311. "name": "aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?"
  3312. },
  3313. {
  3314. "name": "aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)"
  3315. },
  3316. {
  3317. "name": "aten::get.float(Dict(float, t) self, float key) -> t(*)?"
  3318. },
  3319. {
  3320. "name": "aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)"
  3321. },
  3322. {
  3323. "name": "aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?"
  3324. },
  3325. {
  3326. "name": "aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)"
  3327. },
  3328. {
  3329. "name": "aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?"
  3330. },
  3331. {
  3332. "name": "aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)"
  3333. },
  3334. {
  3335. "name": "aten::get_autocast_dtype(str device_type) -> ScalarType"
  3336. },
  3337. {
  3338. "name": "aten::get_device(Tensor self) -> int"
  3339. },
  3340. {
  3341. "name": "aten::get_num_threads() -> int"
  3342. },
  3343. {
  3344. "name": "aten::glu(Tensor self, int dim=-1) -> Tensor",
  3345. "category": "Activation"
  3346. },
  3347. {
  3348. "name": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)"
  3349. },
  3350. {
  3351. "name": "aten::grad(Tensor[] outputs, Tensor[] inputs, Tensor?[]? grad_outputs=None, bool? retain_graph=None, bool create_graph=False, bool allow_unused=False) -> Tensor?[]"
  3352. },
  3353. {
  3354. "name": "aten::greater.Tensor(Tensor self, Tensor other) -> Tensor"
  3355. },
  3356. {
  3357. "name": "aten::greater.Scalar(Tensor self, Scalar other) -> Tensor"
  3358. },
  3359. {
  3360. "name": "aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3361. },
  3362. {
  3363. "name": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3364. },
  3365. {
  3366. "name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  3367. },
  3368. {
  3369. "name": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  3370. },
  3371. {
  3372. "name": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3373. },
  3374. {
  3375. "name": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3376. },
  3377. {
  3378. "name": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3379. },
  3380. {
  3381. "name": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3382. },
  3383. {
  3384. "name": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"
  3385. },
  3386. {
  3387. "name": "aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor"
  3388. },
  3389. {
  3390. "name": "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"
  3391. },
  3392. {
  3393. "name": "aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"
  3394. },
  3395. {
  3396. "name": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enabled=True) -> Tensor",
  3397. "category": "Normalization"
  3398. },
  3399. {
  3400. "name": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  3401. "category": "Layer"
  3402. },
  3403. {
  3404. "name": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  3405. "category": "Layer"
  3406. },
  3407. {
  3408. "name": "aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"
  3409. },
  3410. {
  3411. "name": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"
  3412. },
  3413. {
  3414. "name": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"
  3415. },
  3416. {
  3417. "name": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3418. },
  3419. {
  3420. "name": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3421. },
  3422. {
  3423. "name": "aten::gt.int(int a, int b) -> bool"
  3424. },
  3425. {
  3426. "name": "aten::gt.float(float a, float b) -> bool"
  3427. },
  3428. {
  3429. "name": "aten::gt.int_float(int a, float b) -> bool"
  3430. },
  3431. {
  3432. "name": "aten::gt.float_int(float a, int b) -> bool"
  3433. },
  3434. {
  3435. "name": "aten::gt(Scalar a, Scalar b) -> bool"
  3436. },
  3437. {
  3438. "name": "aten::gt.str(str a, str b) -> bool"
  3439. },
  3440. {
  3441. "name": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3442. },
  3443. {
  3444. "name": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3445. },
  3446. {
  3447. "name": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3448. },
  3449. {
  3450. "name": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3451. },
  3452. {
  3453. "name": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  3454. },
  3455. {
  3456. "name": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  3457. },
  3458. {
  3459. "name": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)"
  3460. },
  3461. {
  3462. "name": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)"
  3463. },
  3464. {
  3465. "name": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3466. },
  3467. {
  3468. "name": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3469. },
  3470. {
  3471. "name": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  3472. },
  3473. {
  3474. "name": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  3475. },
  3476. {
  3477. "name": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  3478. },
  3479. {
  3480. "name": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  3481. },
  3482. {
  3483. "name": "aten::hardsigmoid(Tensor self) -> Tensor",
  3484. "category": "Activation"
  3485. },
  3486. {
  3487. "name": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3488. },
  3489. {
  3490. "name": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)",
  3491. "category": "Activation"
  3492. },
  3493. {
  3494. "name": "aten::hardswish(Tensor self) -> Tensor",
  3495. "category": "Activation"
  3496. },
  3497. {
  3498. "name": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3499. },
  3500. {
  3501. "name": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)",
  3502. "category": "Activation"
  3503. },
  3504. {
  3505. "name": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor"
  3506. },
  3507. {
  3508. "name": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3509. },
  3510. {
  3511. "name": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor",
  3512. "category": "Activation"
  3513. },
  3514. {
  3515. "name": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)",
  3516. "category": "Activation"
  3517. },
  3518. {
  3519. "name": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)",
  3520. "category": "Activation"
  3521. },
  3522. {
  3523. "name": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor"
  3524. },
  3525. {
  3526. "name": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3527. },
  3528. {
  3529. "name": "aten::has_torch_function(...) -> bool"
  3530. },
  3531. {
  3532. "name": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"
  3533. },
  3534. {
  3535. "name": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"
  3536. },
  3537. {
  3538. "name": "aten::hstack(Tensor[] tensors) -> Tensor"
  3539. },
  3540. {
  3541. "name": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  3542. },
  3543. {
  3544. "name": "aten::huber_loss(Tensor self, Tensor target, int reduction=1, float delta=1.) -> Tensor"
  3545. },
  3546. {
  3547. "name": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=1, float delta=1., *, Tensor(a!) out) -> Tensor(a!)"
  3548. },
  3549. {
  3550. "name": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3551. },
  3552. {
  3553. "name": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor"
  3554. },
  3555. {
  3556. "name": "aten::i0(Tensor self) -> Tensor"
  3557. },
  3558. {
  3559. "name": "aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3560. },
  3561. {
  3562. "name": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  3563. },
  3564. {
  3565. "name": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  3566. },
  3567. {
  3568. "name": "aten::imag(Tensor(a) self) -> Tensor(a)"
  3569. },
  3570. {
  3571. "name": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"
  3572. },
  3573. {
  3574. "name": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)"
  3575. },
  3576. {
  3577. "name": "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"
  3578. },
  3579. {
  3580. "name": "aten::index.str(str self, str substr, int start=0, int end=-1) -> int"
  3581. },
  3582. {
  3583. "name": "aten::index.list_int(int[] self, int el) -> int"
  3584. },
  3585. {
  3586. "name": "aten::index.list_float(float[] self, float el) -> int"
  3587. },
  3588. {
  3589. "name": "aten::index.list_bool(bool[] self, bool el) -> int"
  3590. },
  3591. {
  3592. "name": "aten::index.list_Tensor(Tensor[] self, Tensor el) -> int"
  3593. },
  3594. {
  3595. "name": "aten::index.list_str(str[] self, str el) -> int"
  3596. },
  3597. {
  3598. "name": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  3599. },
  3600. {
  3601. "name": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  3602. },
  3603. {
  3604. "name": "aten::index_add.dimname(Tensor self, str dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  3605. },
  3606. {
  3607. "name": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)"
  3608. },
  3609. {
  3610. "name": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"
  3611. },
  3612. {
  3613. "name": "aten::index_copy.dimname(Tensor self, str dim, Tensor index, Tensor source) -> Tensor"
  3614. },
  3615. {
  3616. "name": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  3617. },
  3618. {
  3619. "name": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"
  3620. },
  3621. {
  3622. "name": "aten::index_copy_.dimname(Tensor(a!) self, str dim, Tensor index, Tensor source) -> Tensor(a!)"
  3623. },
  3624. {
  3625. "name": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"
  3626. },
  3627. {
  3628. "name": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  3629. },
  3630. {
  3631. "name": "aten::index_fill.Dimname_Scalar(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  3632. },
  3633. {
  3634. "name": "aten::index_fill.Dimname_Tensor(Tensor self, str dim, Tensor index, Tensor value) -> Tensor"
  3635. },
  3636. {
  3637. "name": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  3638. },
  3639. {
  3640. "name": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  3641. },
  3642. {
  3643. "name": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"
  3644. },
  3645. {
  3646. "name": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  3647. },
  3648. {
  3649. "name": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, str dim, Tensor index, Scalar value) -> Tensor(a!)"
  3650. },
  3651. {
  3652. "name": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, str dim, Tensor index, Tensor value) -> Tensor(a!)"
  3653. },
  3654. {
  3655. "name": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"
  3656. },
  3657. {
  3658. "name": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)"
  3659. },
  3660. {
  3661. "name": "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"
  3662. },
  3663. {
  3664. "name": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  3665. },
  3666. {
  3667. "name": "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  3668. },
  3669. {
  3670. "name": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor"
  3671. },
  3672. {
  3673. "name": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  3674. },
  3675. {
  3676. "name": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)"
  3677. },
  3678. {
  3679. "name": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor"
  3680. },
  3681. {
  3682. "name": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  3683. },
  3684. {
  3685. "name": "aten::index_select.dimname(Tensor self, str dim, Tensor index) -> Tensor"
  3686. },
  3687. {
  3688. "name": "aten::index_select.dimname_out(Tensor self, str dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  3689. },
  3690. {
  3691. "name": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor"
  3692. },
  3693. {
  3694. "name": "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"
  3695. },
  3696. {
  3697. "name": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  3698. "category": "Normalization"
  3699. },
  3700. {
  3701. "name": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3702. },
  3703. {
  3704. "name": "aten::int_repr(Tensor self) -> Tensor"
  3705. },
  3706. {
  3707. "name": "aten::inverse(Tensor self) -> Tensor"
  3708. },
  3709. {
  3710. "name": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3711. },
  3712. {
  3713. "name": "aten::is_autocast_enabled() -> bool"
  3714. },
  3715. {
  3716. "name": "aten::is_contiguous(Tensor self) -> bool"
  3717. },
  3718. {
  3719. "name": "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool"
  3720. },
  3721. {
  3722. "name": "aten::is_floating_point(Tensor self) -> bool"
  3723. },
  3724. {
  3725. "name": "aten::is_grad_enabled() -> bool"
  3726. },
  3727. {
  3728. "name": "aten::is_pinned(Tensor self, Device? device=None) -> bool"
  3729. },
  3730. {
  3731. "name": "aten::is_scripting() -> bool"
  3732. },
  3733. {
  3734. "name": "aten::isdigit(str self) -> bool"
  3735. },
  3736. {
  3737. "name": "aten::isfinite(Tensor self) -> Tensor"
  3738. },
  3739. {
  3740. "name": "aten::isfinite.float(float a) -> bool"
  3741. },
  3742. {
  3743. "name": "aten::isfinite.complex(complex a) -> bool"
  3744. },
  3745. {
  3746. "name": "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3747. },
  3748. {
  3749. "name": "aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3750. },
  3751. {
  3752. "name": "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3753. },
  3754. {
  3755. "name": "aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3756. },
  3757. {
  3758. "name": "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3759. },
  3760. {
  3761. "name": "aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3762. },
  3763. {
  3764. "name": "aten::isinf(Tensor self) -> Tensor"
  3765. },
  3766. {
  3767. "name": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3768. },
  3769. {
  3770. "name": "aten::isinf.float(float a) -> bool"
  3771. },
  3772. {
  3773. "name": "aten::isinf.complex(complex a) -> bool"
  3774. },
  3775. {
  3776. "name": "aten::isnan(Tensor self) -> Tensor"
  3777. },
  3778. {
  3779. "name": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3780. },
  3781. {
  3782. "name": "aten::isnan.float(float a) -> bool"
  3783. },
  3784. {
  3785. "name": "aten::isnan.complex(complex a) -> bool"
  3786. },
  3787. {
  3788. "name": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor"
  3789. },
  3790. {
  3791. "name": "aten::item(Tensor self) -> Scalar"
  3792. },
  3793. {
  3794. "name": "aten::items.str(Dict(str, t) self) -> ((str, t)[])"
  3795. },
  3796. {
  3797. "name": "aten::items.int(Dict(int, t) self) -> ((int, t)[])"
  3798. },
  3799. {
  3800. "name": "aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])"
  3801. },
  3802. {
  3803. "name": "aten::items.float(Dict(float, t) self) -> ((float, t)[])"
  3804. },
  3805. {
  3806. "name": "aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])"
  3807. },
  3808. {
  3809. "name": "aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])"
  3810. },
  3811. {
  3812. "name": "aten::join(str self, str[] values) -> str"
  3813. },
  3814. {
  3815. "name": "aten::keys.str(Dict(str, t) self) -> str[](*)"
  3816. },
  3817. {
  3818. "name": "aten::keys.int(Dict(int, t) self) -> int[](*)"
  3819. },
  3820. {
  3821. "name": "aten::keys.bool(Dict(bool, t) self) -> bool[](*)"
  3822. },
  3823. {
  3824. "name": "aten::keys.float(Dict(float, t) self) -> float[](*)"
  3825. },
  3826. {
  3827. "name": "aten::keys.complex(Dict(complex, t) self) -> complex[](*)"
  3828. },
  3829. {
  3830. "name": "aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)"
  3831. },
  3832. {
  3833. "name": "aten::kl_div(Tensor self, Tensor target, int reduction=1, *, bool log_target=False) -> Tensor"
  3834. },
  3835. {
  3836. "name": "aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"
  3837. },
  3838. {
  3839. "name": "aten::kthvalue.dimname(Tensor self, SymInt k, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  3840. },
  3841. {
  3842. "name": "aten::kthvalue.dimname_out(Tensor self, SymInt k, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3843. },
  3844. {
  3845. "name": "aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3846. },
  3847. {
  3848. "name": "aten::l1_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  3849. },
  3850. {
  3851. "name": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enable=True) -> Tensor",
  3852. "category": "Normalization"
  3853. },
  3854. {
  3855. "name": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor"
  3856. },
  3857. {
  3858. "name": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor"
  3859. },
  3860. {
  3861. "name": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3862. },
  3863. {
  3864. "name": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3865. },
  3866. {
  3867. "name": "aten::le.int(int a, int b) -> bool"
  3868. },
  3869. {
  3870. "name": "aten::le.float(float a, float b) -> bool"
  3871. },
  3872. {
  3873. "name": "aten::le.int_float(int a, float b) -> bool"
  3874. },
  3875. {
  3876. "name": "aten::le.float_int(float a, int b) -> bool"
  3877. },
  3878. {
  3879. "name": "aten::le(Scalar a, Scalar b) -> bool"
  3880. },
  3881. {
  3882. "name": "aten::le.str(str a, str b) -> bool"
  3883. },
  3884. {
  3885. "name": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor",
  3886. "category": "Activation"
  3887. },
  3888. {
  3889. "name": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"
  3890. },
  3891. {
  3892. "name": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)",
  3893. "category": "Activation"
  3894. },
  3895. {
  3896. "name": "aten::len.t(t[] a) -> int"
  3897. },
  3898. {
  3899. "name": "aten::len.Tensor(Tensor t) -> int"
  3900. },
  3901. {
  3902. "name": "aten::len.str(str s) -> int"
  3903. },
  3904. {
  3905. "name": "aten::len.Dict_str(Dict(str, t) self) -> int"
  3906. },
  3907. {
  3908. "name": "aten::len.Dict_int(Dict(int, t) self) -> int"
  3909. },
  3910. {
  3911. "name": "aten::len.Dict_bool(Dict(bool, t) self) -> int"
  3912. },
  3913. {
  3914. "name": "aten::len.Dict_float(Dict(float, t) self) -> int"
  3915. },
  3916. {
  3917. "name": "aten::len.Dict_complex(Dict(complex, t) self) -> int"
  3918. },
  3919. {
  3920. "name": "aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int"
  3921. },
  3922. {
  3923. "name": "aten::len.any(Any[] a) -> int"
  3924. },
  3925. {
  3926. "name": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"
  3927. },
  3928. {
  3929. "name": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"
  3930. },
  3931. {
  3932. "name": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"
  3933. },
  3934. {
  3935. "name": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"
  3936. },
  3937. {
  3938. "name": "aten::less.Tensor(Tensor self, Tensor other) -> Tensor"
  3939. },
  3940. {
  3941. "name": "aten::less.Scalar(Tensor self, Scalar other) -> Tensor"
  3942. },
  3943. {
  3944. "name": "aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3945. },
  3946. {
  3947. "name": "aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3948. },
  3949. {
  3950. "name": "aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  3951. },
  3952. {
  3953. "name": "aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  3954. },
  3955. {
  3956. "name": "aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3957. },
  3958. {
  3959. "name": "aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3960. },
  3961. {
  3962. "name": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)"
  3963. },
  3964. {
  3965. "name": "aten::lift_fresh_copy(Tensor self) -> Tensor"
  3966. },
  3967. {
  3968. "name": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3969. },
  3970. {
  3971. "name": "aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor"
  3972. },
  3973. {
  3974. "name": "aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)"
  3975. },
  3976. {
  3977. "name": "aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)"
  3978. },
  3979. {
  3980. "name": "aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)"
  3981. },
  3982. {
  3983. "name": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"
  3984. },
  3985. {
  3986. "name": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"
  3987. },
  3988. {
  3989. "name": "aten::linalg_det(Tensor A) -> Tensor"
  3990. },
  3991. {
  3992. "name": "aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"
  3993. },
  3994. {
  3995. "name": "aten::linalg_inv(Tensor A) -> Tensor"
  3996. },
  3997. {
  3998. "name": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"
  3999. },
  4000. {
  4001. "name": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)"
  4002. },
  4003. {
  4004. "name": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)"
  4005. },
  4006. {
  4007. "name": "aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)"
  4008. },
  4009. {
  4010. "name": "aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)"
  4011. },
  4012. {
  4013. "name": "aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4014. },
  4015. {
  4016. "name": "aten::linalg_matrix_norm.str_ord(Tensor self, str ord=\"fro\", int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4017. },
  4018. {
  4019. "name": "aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4020. },
  4021. {
  4022. "name": "aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord=\"fro\", int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4023. },
  4024. {
  4025. "name": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4026. },
  4027. {
  4028. "name": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4029. },
  4030. {
  4031. "name": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4032. },
  4033. {
  4034. "name": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4035. },
  4036. {
  4037. "name": "aten::linalg_qr(Tensor A, str mode=\"reduced\") -> (Tensor Q, Tensor R)"
  4038. },
  4039. {
  4040. "name": "aten::linalg_qr.out(Tensor A, str mode=\"reduced\", *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)"
  4041. },
  4042. {
  4043. "name": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)"
  4044. },
  4045. {
  4046. "name": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  4047. },
  4048. {
  4049. "name": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor"
  4050. },
  4051. {
  4052. "name": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)"
  4053. },
  4054. {
  4055. "name": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)"
  4056. },
  4057. {
  4058. "name": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)"
  4059. },
  4060. {
  4061. "name": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor"
  4062. },
  4063. {
  4064. "name": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)"
  4065. },
  4066. {
  4067. "name": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)"
  4068. },
  4069. {
  4070. "name": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)"
  4071. },
  4072. {
  4073. "name": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor"
  4074. },
  4075. {
  4076. "name": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)"
  4077. },
  4078. {
  4079. "name": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor"
  4080. },
  4081. {
  4082. "name": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)"
  4083. },
  4084. {
  4085. "name": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4086. },
  4087. {
  4088. "name": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4089. },
  4090. {
  4091. "name": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor",
  4092. "category": "Layer"
  4093. },
  4094. {
  4095. "name": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)",
  4096. "category": "Layer"
  4097. },
  4098. {
  4099. "name": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  4100. },
  4101. {
  4102. "name": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  4103. },
  4104. {
  4105. "name": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4106. },
  4107. {
  4108. "name": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4109. },
  4110. {
  4111. "name": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4112. },
  4113. {
  4114. "name": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4115. },
  4116. {
  4117. "name": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  4118. },
  4119. {
  4120. "name": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  4121. },
  4122. {
  4123. "name": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  4124. },
  4125. {
  4126. "name": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  4127. },
  4128. {
  4129. "name": "aten::list(str t) -> str[]"
  4130. },
  4131. {
  4132. "name": "aten::list.t(t[] l) -> t[]"
  4133. },
  4134. {
  4135. "name": "aten::list_with_default(int[] list, int[] defaults) -> int[]"
  4136. },
  4137. {
  4138. "name": "aten::log(Tensor self) -> Tensor"
  4139. },
  4140. {
  4141. "name": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4142. },
  4143. {
  4144. "name": "aten::log.int(int a) -> float"
  4145. },
  4146. {
  4147. "name": "aten::log.float(float a) -> float"
  4148. },
  4149. {
  4150. "name": "aten::log.complex(complex a) -> complex"
  4151. },
  4152. {
  4153. "name": "aten::log.Scalar(Scalar a) -> Scalar"
  4154. },
  4155. {
  4156. "name": "aten::log.int_int(int a, int b) -> float"
  4157. },
  4158. {
  4159. "name": "aten::log.float_float(float a, float b) -> float"
  4160. },
  4161. {
  4162. "name": "aten::log.complex_complex(complex a, complex b) -> complex"
  4163. },
  4164. {
  4165. "name": "aten::log.int_float(int a, float b) -> float"
  4166. },
  4167. {
  4168. "name": "aten::log.float_int(float a, int b) -> float"
  4169. },
  4170. {
  4171. "name": "aten::log.int_complex(int a, complex b) -> complex"
  4172. },
  4173. {
  4174. "name": "aten::log.complex_int(complex a, int b) -> complex"
  4175. },
  4176. {
  4177. "name": "aten::log.float_complex(float a, complex b) -> complex"
  4178. },
  4179. {
  4180. "name": "aten::log.complex_float(complex a, float b) -> complex"
  4181. },
  4182. {
  4183. "name": "aten::log.Scalar_Scalar(Scalar a, Scalar b) -> float"
  4184. },
  4185. {
  4186. "name": "aten::log10(Tensor self) -> Tensor"
  4187. },
  4188. {
  4189. "name": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4190. },
  4191. {
  4192. "name": "aten::log10.int(int a) -> float"
  4193. },
  4194. {
  4195. "name": "aten::log10.float(float a) -> float"
  4196. },
  4197. {
  4198. "name": "aten::log10.complex(complex a) -> complex"
  4199. },
  4200. {
  4201. "name": "aten::log10.Scalar(Scalar a) -> Scalar"
  4202. },
  4203. {
  4204. "name": "aten::log10_(Tensor(a!) self) -> Tensor(a!)"
  4205. },
  4206. {
  4207. "name": "aten::log1p(Tensor self) -> Tensor"
  4208. },
  4209. {
  4210. "name": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4211. },
  4212. {
  4213. "name": "aten::log1p.int(int a) -> float"
  4214. },
  4215. {
  4216. "name": "aten::log1p.float(float a) -> float"
  4217. },
  4218. {
  4219. "name": "aten::log1p.Scalar(Scalar a) -> Scalar"
  4220. },
  4221. {
  4222. "name": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)"
  4223. },
  4224. {
  4225. "name": "aten::log2(Tensor self) -> Tensor"
  4226. },
  4227. {
  4228. "name": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4229. },
  4230. {
  4231. "name": "aten::log2_(Tensor(a!) self) -> Tensor(a!)"
  4232. },
  4233. {
  4234. "name": "aten::log_(Tensor(a!) self) -> Tensor(a!)"
  4235. },
  4236. {
  4237. "name": "aten::log_normal_(Tensor(a!) self, float mean=1., float std=2., *, Generator? generator=None) -> Tensor(a!)"
  4238. },
  4239. {
  4240. "name": "aten::log_sigmoid(Tensor self) -> Tensor"
  4241. },
  4242. {
  4243. "name": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4244. },
  4245. {
  4246. "name": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor"
  4247. },
  4248. {
  4249. "name": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4250. },
  4251. {
  4252. "name": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)"
  4253. },
  4254. {
  4255. "name": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"
  4256. },
  4257. {
  4258. "name": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  4259. "category": "Activation"
  4260. },
  4261. {
  4262. "name": "aten::log_softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  4263. "category": "Activation"
  4264. },
  4265. {
  4266. "name": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  4267. },
  4268. {
  4269. "name": "aten::logaddexp(Tensor self, Tensor other) -> Tensor"
  4270. },
  4271. {
  4272. "name": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4273. },
  4274. {
  4275. "name": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor"
  4276. },
  4277. {
  4278. "name": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4279. },
  4280. {
  4281. "name": "aten::logcumsumexp(Tensor self, int dim) -> Tensor"
  4282. },
  4283. {
  4284. "name": "aten::logcumsumexp.dimname(Tensor self, str dim) -> Tensor"
  4285. },
  4286. {
  4287. "name": "aten::logcumsumexp.dimname_out(Tensor self, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  4288. },
  4289. {
  4290. "name": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  4291. },
  4292. {
  4293. "name": "aten::logdet(Tensor self) -> Tensor"
  4294. },
  4295. {
  4296. "name": "aten::logical_and(Tensor self, Tensor other) -> Tensor"
  4297. },
  4298. {
  4299. "name": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4300. },
  4301. {
  4302. "name": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4303. },
  4304. {
  4305. "name": "aten::logical_not(Tensor self) -> Tensor"
  4306. },
  4307. {
  4308. "name": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4309. },
  4310. {
  4311. "name": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)"
  4312. },
  4313. {
  4314. "name": "aten::logical_or(Tensor self, Tensor other) -> Tensor"
  4315. },
  4316. {
  4317. "name": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4318. },
  4319. {
  4320. "name": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4321. },
  4322. {
  4323. "name": "aten::logical_xor(Tensor self, Tensor other) -> Tensor"
  4324. },
  4325. {
  4326. "name": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4327. },
  4328. {
  4329. "name": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4330. },
  4331. {
  4332. "name": "aten::logit(Tensor self, float? eps=None) -> Tensor"
  4333. },
  4334. {
  4335. "name": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"
  4336. },
  4337. {
  4338. "name": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)"
  4339. },
  4340. {
  4341. "name": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor"
  4342. },
  4343. {
  4344. "name": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4345. },
  4346. {
  4347. "name": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4348. },
  4349. {
  4350. "name": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4351. },
  4352. {
  4353. "name": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4354. },
  4355. {
  4356. "name": "aten::logspace(Scalar start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4357. },
  4358. {
  4359. "name": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4360. },
  4361. {
  4362. "name": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4363. },
  4364. {
  4365. "name": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4366. },
  4367. {
  4368. "name": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4369. },
  4370. {
  4371. "name": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"
  4372. },
  4373. {
  4374. "name": "aten::logsumexp.names(Tensor self, str[1] dim, bool keepdim=False) -> Tensor"
  4375. },
  4376. {
  4377. "name": "aten::logsumexp.names_out(Tensor self, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4378. },
  4379. {
  4380. "name": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4381. },
  4382. {
  4383. "name": "aten::lower(str self) -> str"
  4384. },
  4385. {
  4386. "name": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)",
  4387. "category": "Layer"
  4388. },
  4389. {
  4390. "name": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)",
  4391. "category": "Layer"
  4392. },
  4393. {
  4394. "name": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)",
  4395. "category": "Layer"
  4396. },
  4397. {
  4398. "name": "aten::lstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  4399. },
  4400. {
  4401. "name": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"
  4402. },
  4403. {
  4404. "name": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"
  4405. },
  4406. {
  4407. "name": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4408. },
  4409. {
  4410. "name": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4411. },
  4412. {
  4413. "name": "aten::lt.int(int a, int b) -> bool"
  4414. },
  4415. {
  4416. "name": "aten::lt.float(float a, float b) -> bool"
  4417. },
  4418. {
  4419. "name": "aten::lt.int_float(int a, float b) -> bool"
  4420. },
  4421. {
  4422. "name": "aten::lt.float_int(float a, int b) -> bool"
  4423. },
  4424. {
  4425. "name": "aten::lt(Scalar a, Scalar b) -> bool"
  4426. },
  4427. {
  4428. "name": "aten::lt.str(str a, str b) -> bool"
  4429. },
  4430. {
  4431. "name": "aten::mT(Tensor(a) self) -> Tensor(a)"
  4432. },
  4433. {
  4434. "name": "aten::mT.a(Tensor(a) self) -> Tensor(a)"
  4435. },
  4436. {
  4437. "name": "aten::manual_seed(int seed) -> ()"
  4438. },
  4439. {
  4440. "name": "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)"
  4441. },
  4442. {
  4443. "name": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor"
  4444. },
  4445. {
  4446. "name": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor"
  4447. },
  4448. {
  4449. "name": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  4450. },
  4451. {
  4452. "name": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  4453. },
  4454. {
  4455. "name": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)"
  4456. },
  4457. {
  4458. "name": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)"
  4459. },
  4460. {
  4461. "name": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor"
  4462. },
  4463. {
  4464. "name": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  4465. },
  4466. {
  4467. "name": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)"
  4468. },
  4469. {
  4470. "name": "aten::masked_select(Tensor self, Tensor mask) -> Tensor"
  4471. },
  4472. {
  4473. "name": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)"
  4474. },
  4475. {
  4476. "name": "aten::matmul(Tensor self, Tensor other) -> Tensor"
  4477. },
  4478. {
  4479. "name": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4480. },
  4481. {
  4482. "name": "aten::max.other(Tensor self, Tensor other) -> Tensor"
  4483. },
  4484. {
  4485. "name": "aten::max(Tensor self) -> Tensor"
  4486. },
  4487. {
  4488. "name": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4489. },
  4490. {
  4491. "name": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  4492. },
  4493. {
  4494. "name": "aten::max.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4495. },
  4496. {
  4497. "name": "aten::max.names_dim_max(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  4498. },
  4499. {
  4500. "name": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4501. },
  4502. {
  4503. "name": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4504. },
  4505. {
  4506. "name": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> Tensor",
  4507. "category": "Pool"
  4508. },
  4509. {
  4510. "name": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor, Tensor)",
  4511. "category": "Pool"
  4512. },
  4513. {
  4514. "name": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
  4515. "category": "Pool"
  4516. },
  4517. {
  4518. "name": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
  4519. "category": "Pool"
  4520. },
  4521. {
  4522. "name": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  4523. },
  4524. {
  4525. "name": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor",
  4526. "category": "Pool"
  4527. },
  4528. {
  4529. "name": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"
  4530. },
  4531. {
  4532. "name": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  4533. },
  4534. {
  4535. "name": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor"
  4536. },
  4537. {
  4538. "name": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4539. },
  4540. {
  4541. "name": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor",
  4542. "category": "Pool"
  4543. },
  4544. {
  4545. "name": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  4546. },
  4547. {
  4548. "name": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor",
  4549. "category": "Pool"
  4550. },
  4551. {
  4552. "name": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)"
  4553. },
  4554. {
  4555. "name": "aten::maximum(Tensor self, Tensor other) -> Tensor"
  4556. },
  4557. {
  4558. "name": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4559. },
  4560. {
  4561. "name": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  4562. },
  4563. {
  4564. "name": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4565. },
  4566. {
  4567. "name": "aten::mean.names_dim(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4568. },
  4569. {
  4570. "name": "aten::mean.names_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4571. },
  4572. {
  4573. "name": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4574. },
  4575. {
  4576. "name": "aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4577. },
  4578. {
  4579. "name": "aten::median(Tensor self) -> Tensor"
  4580. },
  4581. {
  4582. "name": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4583. },
  4584. {
  4585. "name": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4586. },
  4587. {
  4588. "name": "aten::median.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4589. },
  4590. {
  4591. "name": "aten::median.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4592. },
  4593. {
  4594. "name": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4595. },
  4596. {
  4597. "name": "aten::meshgrid(Tensor[] tensors) -> Tensor[]",
  4598. "category": "Tensor"
  4599. },
  4600. {
  4601. "name": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]",
  4602. "category": "Tensor"
  4603. },
  4604. {
  4605. "name": "aten::min.other(Tensor self, Tensor other) -> Tensor"
  4606. },
  4607. {
  4608. "name": "aten::min(Tensor self) -> Tensor"
  4609. },
  4610. {
  4611. "name": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4612. },
  4613. {
  4614. "name": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4615. },
  4616. {
  4617. "name": "aten::min.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4618. },
  4619. {
  4620. "name": "aten::min.names_dim_min(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4621. },
  4622. {
  4623. "name": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4624. },
  4625. {
  4626. "name": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4627. },
  4628. {
  4629. "name": "aten::minimum(Tensor self, Tensor other) -> Tensor"
  4630. },
  4631. {
  4632. "name": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4633. },
  4634. {
  4635. "name": "aten::mish(Tensor self) -> Tensor",
  4636. "category": "Activation"
  4637. },
  4638. {
  4639. "name": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4640. },
  4641. {
  4642. "name": "aten::mish_(Tensor(a!) self) -> Tensor(a!)",
  4643. "category": "Activation"
  4644. },
  4645. {
  4646. "name": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  4647. },
  4648. {
  4649. "name": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None) -> Tensor"
  4650. },
  4651. {
  4652. "name": "aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)"
  4653. },
  4654. {
  4655. "name": "aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  4656. },
  4657. {
  4658. "name": "aten::mm(Tensor self, Tensor mat2) -> Tensor"
  4659. },
  4660. {
  4661. "name": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  4662. },
  4663. {
  4664. "name": "aten::mm.dtype_out(Tensor self, Tensor mat2, ScalarType out_dtype, *, Tensor(a!) out) -> Tensor(a!)"
  4665. },
  4666. {
  4667. "name": "aten::mm.dtype(Tensor self, Tensor mat2, ScalarType out_dtype) -> Tensor"
  4668. },
  4669. {
  4670. "name": "aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4671. },
  4672. {
  4673. "name": "aten::mode.dimname(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4674. },
  4675. {
  4676. "name": "aten::mode.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4677. },
  4678. {
  4679. "name": "aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4680. },
  4681. {
  4682. "name": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  4683. },
  4684. {
  4685. "name": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  4686. },
  4687. {
  4688. "name": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  4689. },
  4690. {
  4691. "name": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  4692. },
  4693. {
  4694. "name": "aten::mse_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  4695. },
  4696. {
  4697. "name": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  4698. },
  4699. {
  4700. "name": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor"
  4701. },
  4702. {
  4703. "name": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4704. },
  4705. {
  4706. "name": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor"
  4707. },
  4708. {
  4709. "name": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor"
  4710. },
  4711. {
  4712. "name": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4713. },
  4714. {
  4715. "name": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4716. },
  4717. {
  4718. "name": "aten::mul.left_t(t[] l, int n) -> t[]"
  4719. },
  4720. {
  4721. "name": "aten::mul.right_(int n, t[] l) -> t[]"
  4722. },
  4723. {
  4724. "name": "aten::mul.int(int a, int b) -> int"
  4725. },
  4726. {
  4727. "name": "aten::mul.complex(complex a, complex b) -> complex"
  4728. },
  4729. {
  4730. "name": "aten::mul.float(float a, float b) -> float"
  4731. },
  4732. {
  4733. "name": "aten::mul.int_complex(int a, complex b) -> complex"
  4734. },
  4735. {
  4736. "name": "aten::mul.complex_int(complex a, int b) -> complex"
  4737. },
  4738. {
  4739. "name": "aten::mul.float_complex(float a, complex b) -> complex"
  4740. },
  4741. {
  4742. "name": "aten::mul.complex_float(complex a, float b) -> complex"
  4743. },
  4744. {
  4745. "name": "aten::mul.int_float(int a, float b) -> float"
  4746. },
  4747. {
  4748. "name": "aten::mul.float_int(float a, int b) -> float"
  4749. },
  4750. {
  4751. "name": "aten::mul(Scalar a, Scalar b) -> Scalar"
  4752. },
  4753. {
  4754. "name": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4755. },
  4756. {
  4757. "name": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4758. },
  4759. {
  4760. "name": "aten::mul_.t(t[](a!) l, int n) -> t[](a!)"
  4761. },
  4762. {
  4763. "name": "aten::multinomial(Tensor self, SymInt num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor"
  4764. },
  4765. {
  4766. "name": "aten::multinomial.out(Tensor self, SymInt num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4767. },
  4768. {
  4769. "name": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor"
  4770. },
  4771. {
  4772. "name": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor"
  4773. },
  4774. {
  4775. "name": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4776. },
  4777. {
  4778. "name": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4779. },
  4780. {
  4781. "name": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4782. },
  4783. {
  4784. "name": "aten::mv(Tensor self, Tensor vec) -> Tensor"
  4785. },
  4786. {
  4787. "name": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)"
  4788. },
  4789. {
  4790. "name": "aten::mvlgamma(Tensor self, int p) -> Tensor"
  4791. },
  4792. {
  4793. "name": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)"
  4794. },
  4795. {
  4796. "name": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)"
  4797. },
  4798. {
  4799. "name": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor"
  4800. },
  4801. {
  4802. "name": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)"
  4803. },
  4804. {
  4805. "name": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)"
  4806. },
  4807. {
  4808. "name": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)"
  4809. },
  4810. {
  4811. "name": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)"
  4812. },
  4813. {
  4814. "name": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor"
  4815. },
  4816. {
  4817. "name": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)"
  4818. },
  4819. {
  4820. "name": "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)"
  4821. },
  4822. {
  4823. "name": "aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  4824. },
  4825. {
  4826. "name": "aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)"
  4827. },
  4828. {
  4829. "name": "aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  4830. },
  4831. {
  4832. "name": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)",
  4833. "category": "Normalization"
  4834. },
  4835. {
  4836. "name": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))",
  4837. "category": "Normalization"
  4838. },
  4839. {
  4840. "name": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"
  4841. },
  4842. {
  4843. "name": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"
  4844. },
  4845. {
  4846. "name": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4847. },
  4848. {
  4849. "name": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4850. },
  4851. {
  4852. "name": "aten::ne.int_list(int[] a, int[] b) -> bool"
  4853. },
  4854. {
  4855. "name": "aten::ne.device(Device a, Device b) -> bool"
  4856. },
  4857. {
  4858. "name": "aten::ne.bool(bool a, bool b) -> bool"
  4859. },
  4860. {
  4861. "name": "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"
  4862. },
  4863. {
  4864. "name": "aten::ne.int(int a, int b) -> bool"
  4865. },
  4866. {
  4867. "name": "aten::ne.complex(complex a, complex b) -> bool"
  4868. },
  4869. {
  4870. "name": "aten::ne.float(float a, float b) -> bool"
  4871. },
  4872. {
  4873. "name": "aten::ne.int_float(int a, float b) -> bool"
  4874. },
  4875. {
  4876. "name": "aten::ne.float_int(float a, int b) -> bool"
  4877. },
  4878. {
  4879. "name": "aten::ne.float_complex(float a, complex b) -> bool"
  4880. },
  4881. {
  4882. "name": "aten::ne.complex_float(complex a, float b) -> bool"
  4883. },
  4884. {
  4885. "name": "aten::ne(Scalar a, Scalar b) -> bool"
  4886. },
  4887. {
  4888. "name": "aten::ne.str(str a, str b) -> bool"
  4889. },
  4890. {
  4891. "name": "aten::ne.float_list(float[] a, float[] b) -> bool"
  4892. },
  4893. {
  4894. "name": "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  4895. },
  4896. {
  4897. "name": "aten::ne.bool_list(bool[] a, bool[] b) -> bool"
  4898. },
  4899. {
  4900. "name": "aten::ne.str_list(str[] a, str[] b) -> bool"
  4901. },
  4902. {
  4903. "name": "aten::neg(Tensor self) -> Tensor"
  4904. },
  4905. {
  4906. "name": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4907. },
  4908. {
  4909. "name": "aten::neg.int(int a) -> int"
  4910. },
  4911. {
  4912. "name": "aten::neg.float(float a) -> float"
  4913. },
  4914. {
  4915. "name": "aten::neg.complex(complex a) -> complex"
  4916. },
  4917. {
  4918. "name": "aten::neg.Scalar(Scalar a) -> Scalar"
  4919. },
  4920. {
  4921. "name": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor"
  4922. },
  4923. {
  4924. "name": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4925. },
  4926. {
  4927. "name": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4928. },
  4929. {
  4930. "name": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4931. },
  4932. {
  4933. "name": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)"
  4934. },
  4935. {
  4936. "name": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4937. },
  4938. {
  4939. "name": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  4940. },
  4941. {
  4942. "name": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4943. },
  4944. {
  4945. "name": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4946. },
  4947. {
  4948. "name": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4949. },
  4950. {
  4951. "name": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4952. },
  4953. {
  4954. "name": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4955. },
  4956. {
  4957. "name": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  4958. },
  4959. {
  4960. "name": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4961. },
  4962. {
  4963. "name": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  4964. },
  4965. {
  4966. "name": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4967. },
  4968. {
  4969. "name": "aten::nonzero(Tensor self) -> Tensor"
  4970. },
  4971. {
  4972. "name": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4973. },
  4974. {
  4975. "name": "aten::nonzero_numpy(Tensor self) -> Tensor[]"
  4976. },
  4977. {
  4978. "name": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor"
  4979. },
  4980. {
  4981. "name": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor"
  4982. },
  4983. {
  4984. "name": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, str[1] dim, bool keepdim=False) -> Tensor"
  4985. },
  4986. {
  4987. "name": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  4988. },
  4989. {
  4990. "name": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4991. },
  4992. {
  4993. "name": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4994. },
  4995. {
  4996. "name": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor"
  4997. },
  4998. {
  4999. "name": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  5000. },
  5001. {
  5002. "name": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  5003. },
  5004. {
  5005. "name": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  5006. },
  5007. {
  5008. "name": "aten::norm.names_dtype_out(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  5009. },
  5010. {
  5011. "name": "aten::norm.names_out(Tensor self, Scalar? p, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  5012. },
  5013. {
  5014. "name": "aten::normal.Tensor_float(Tensor mean, float std=1., *, Generator? generator=None) -> Tensor"
  5015. },
  5016. {
  5017. "name": "aten::normal.Tensor_float_out(Tensor mean, float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  5018. },
  5019. {
  5020. "name": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  5021. },
  5022. {
  5023. "name": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor"
  5024. },
  5025. {
  5026. "name": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor"
  5027. },
  5028. {
  5029. "name": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  5030. },
  5031. {
  5032. "name": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5033. },
  5034. {
  5035. "name": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  5036. },
  5037. {
  5038. "name": "aten::normal.out(Tensor self, float mean=0., float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  5039. },
  5040. {
  5041. "name": "aten::normal_(Tensor(a!) self, float mean=0., float std=1., *, Generator? generator=None) -> Tensor(a!)"
  5042. },
  5043. {
  5044. "name": "aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  5045. },
  5046. {
  5047. "name": "aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  5048. },
  5049. {
  5050. "name": "aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5051. },
  5052. {
  5053. "name": "aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5054. },
  5055. {
  5056. "name": "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor"
  5057. },
  5058. {
  5059. "name": "aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor"
  5060. },
  5061. {
  5062. "name": "aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  5063. },
  5064. {
  5065. "name": "aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  5066. },
  5067. {
  5068. "name": "aten::numel(Tensor self) -> int"
  5069. },
  5070. {
  5071. "name": "aten::numpy_T(Tensor(a) self) -> Tensor(a)"
  5072. },
  5073. {
  5074. "name": "aten::numpy_T.a(Tensor(a) self) -> Tensor(a)"
  5075. },
  5076. {
  5077. "name": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor"
  5078. },
  5079. {
  5080. "name": "aten::ones.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5081. },
  5082. {
  5083. "name": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5084. },
  5085. {
  5086. "name": "aten::ones.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5087. },
  5088. {
  5089. "name": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5090. },
  5091. {
  5092. "name": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5093. },
  5094. {
  5095. "name": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5096. },
  5097. {
  5098. "name": "aten::ord(str string) -> int"
  5099. },
  5100. {
  5101. "name": "aten::outer(Tensor self, Tensor vec2) -> Tensor"
  5102. },
  5103. {
  5104. "name": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  5105. },
  5106. {
  5107. "name": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor",
  5108. "category": "Tensor"
  5109. },
  5110. {
  5111. "name": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0., str padding_side=\"right\") -> Tensor"
  5112. },
  5113. {
  5114. "name": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2., float eps=9.9999999999999995e-07, bool keepdim=False) -> Tensor"
  5115. },
  5116. {
  5117. "name": "aten::pdist(Tensor self, float p=2.) -> Tensor"
  5118. },
  5119. {
  5120. "name": "aten::percentFormat(str self, ...) -> str"
  5121. },
  5122. {
  5123. "name": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)",
  5124. "category": "Shape"
  5125. },
  5126. {
  5127. "name": "aten::permute_copy(Tensor self, int[] dims) -> Tensor"
  5128. },
  5129. {
  5130. "name": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  5131. },
  5132. {
  5133. "name": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)"
  5134. },
  5135. {
  5136. "name": "aten::pinverse(Tensor self, float rcond=1.0000000000000001e-15) -> Tensor"
  5137. },
  5138. {
  5139. "name": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor"
  5140. },
  5141. {
  5142. "name": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  5143. },
  5144. {
  5145. "name": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor"
  5146. },
  5147. {
  5148. "name": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  5149. },
  5150. {
  5151. "name": "aten::poisson(Tensor self, Generator? generator=None) -> Tensor"
  5152. },
  5153. {
  5154. "name": "aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"
  5155. },
  5156. {
  5157. "name": "aten::polar(Tensor abs, Tensor angle) -> Tensor"
  5158. },
  5159. {
  5160. "name": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)"
  5161. },
  5162. {
  5163. "name": "aten::polar.int(int a, int b) -> complex"
  5164. },
  5165. {
  5166. "name": "aten::polar.float(float a, float b) -> complex"
  5167. },
  5168. {
  5169. "name": "aten::polar.int_float(int a, float b) -> complex"
  5170. },
  5171. {
  5172. "name": "aten::polar.float_int(float a, int b) -> complex"
  5173. },
  5174. {
  5175. "name": "aten::polar.Scalar_Scalar(Scalar a, Scalar b) -> Scalar"
  5176. },
  5177. {
  5178. "name": "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"
  5179. },
  5180. {
  5181. "name": "aten::pop.Dict_str(Dict(str, t)(a!) self, str key) -> t(*)"
  5182. },
  5183. {
  5184. "name": "aten::pop.Dict_default_str(Dict(str, t)(a!) self, str key, t default_value) -> t(*)"
  5185. },
  5186. {
  5187. "name": "aten::pop.Dict_int(Dict(int, t)(a!) self, int key) -> t(*)"
  5188. },
  5189. {
  5190. "name": "aten::pop.Dict_default_int(Dict(int, t)(a!) self, int key, t default_value) -> t(*)"
  5191. },
  5192. {
  5193. "name": "aten::pop.Dict_bool(Dict(bool, t)(a!) self, bool key) -> t(*)"
  5194. },
  5195. {
  5196. "name": "aten::pop.Dict_default_bool(Dict(bool, t)(a!) self, bool key, t default_value) -> t(*)"
  5197. },
  5198. {
  5199. "name": "aten::pop.Dict_float(Dict(float, t)(a!) self, float key) -> t(*)"
  5200. },
  5201. {
  5202. "name": "aten::pop.Dict_default_float(Dict(float, t)(a!) self, float key, t default_value) -> t(*)"
  5203. },
  5204. {
  5205. "name": "aten::pop.Dict_complex(Dict(complex, t)(a!) self, complex key) -> t(*)"
  5206. },
  5207. {
  5208. "name": "aten::pop.Dict_default_complex(Dict(complex, t)(a!) self, complex key, t default_value) -> t(*)"
  5209. },
  5210. {
  5211. "name": "aten::pop.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> t(*)"
  5212. },
  5213. {
  5214. "name": "aten::pop.Dict_default_Tensor(Dict(Tensor, t)(a!) self, Tensor key, t default_value) -> t(*)"
  5215. },
  5216. {
  5217. "name": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor"
  5218. },
  5219. {
  5220. "name": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor"
  5221. },
  5222. {
  5223. "name": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor"
  5224. },
  5225. {
  5226. "name": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5227. },
  5228. {
  5229. "name": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5230. },
  5231. {
  5232. "name": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5233. },
  5234. {
  5235. "name": "aten::pow.int(int a, int b) -> float"
  5236. },
  5237. {
  5238. "name": "aten::pow.complex(complex a, complex b) -> complex"
  5239. },
  5240. {
  5241. "name": "aten::pow.float(float a, float b) -> float"
  5242. },
  5243. {
  5244. "name": "aten::pow.int_float(int a, float b) -> float"
  5245. },
  5246. {
  5247. "name": "aten::pow.float_int(float a, int b) -> float"
  5248. },
  5249. {
  5250. "name": "aten::pow.float_complex(float a, complex b) -> complex"
  5251. },
  5252. {
  5253. "name": "aten::pow.complex_float(complex a, float b) -> complex"
  5254. },
  5255. {
  5256. "name": "aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float"
  5257. },
  5258. {
  5259. "name": "aten::pow.int_to_int(int a, int b) -> int"
  5260. },
  5261. {
  5262. "name": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)"
  5263. },
  5264. {
  5265. "name": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)"
  5266. },
  5267. {
  5268. "name": "aten::prelu(Tensor self, Tensor weight) -> Tensor",
  5269. "category": "Activation"
  5270. },
  5271. {
  5272. "name": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  5273. },
  5274. {
  5275. "name": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  5276. },
  5277. {
  5278. "name": "aten::prod.dim_Dimname(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  5279. },
  5280. {
  5281. "name": "aten::prod.Dimname_out(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  5282. },
  5283. {
  5284. "name": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  5285. },
  5286. {
  5287. "name": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  5288. },
  5289. {
  5290. "name": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)"
  5291. },
  5292. {
  5293. "name": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  5294. },
  5295. {
  5296. "name": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  5297. },
  5298. {
  5299. "name": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  5300. },
  5301. {
  5302. "name": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  5303. },
  5304. {
  5305. "name": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor",
  5306. "category": "Quantization"
  5307. },
  5308. {
  5309. "name": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  5310. },
  5311. {
  5312. "name": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor",
  5313. "category": "Quantization"
  5314. },
  5315. {
  5316. "name": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor",
  5317. "category": "Quantization"
  5318. },
  5319. {
  5320. "name": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]",
  5321. "category": "Quantization"
  5322. },
  5323. {
  5324. "name": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  5325. },
  5326. {
  5327. "name": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  5328. },
  5329. {
  5330. "name": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()"
  5331. },
  5332. {
  5333. "name": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor",
  5334. "category": "Quantization"
  5335. },
  5336. {
  5337. "name": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)"
  5338. },
  5339. {
  5340. "name": "aten::quantized_gru.input(Tensor input, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5341. "category": "Layer"
  5342. },
  5343. {
  5344. "name": "aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  5345. "category": "Layer"
  5346. },
  5347. {
  5348. "name": "aten::quantized_gru.input_legacy(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5349. "category": "Layer"
  5350. },
  5351. {
  5352. "name": "aten::quantized_gru.data_legacy(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  5353. "category": "Layer"
  5354. },
  5355. {
  5356. "name": "aten::quantized_lstm.input(Tensor input, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5357. "category": "Layer"
  5358. },
  5359. {
  5360. "name": "aten::quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5361. "category": "Layer"
  5362. },
  5363. {
  5364. "name": "aten::quantized_lstm.input_legacy(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5365. "category": "Layer"
  5366. },
  5367. {
  5368. "name": "aten::quantized_lstm.data_legacy(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5369. "category": "Layer"
  5370. },
  5371. {
  5372. "name": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)"
  5373. },
  5374. {
  5375. "name": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5376. },
  5377. {
  5378. "name": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5379. },
  5380. {
  5381. "name": "aten::rand.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5382. },
  5383. {
  5384. "name": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5385. },
  5386. {
  5387. "name": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5388. },
  5389. {
  5390. "name": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5391. },
  5392. {
  5393. "name": "aten::rand.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5394. },
  5395. {
  5396. "name": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5397. },
  5398. {
  5399. "name": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5400. },
  5401. {
  5402. "name": "aten::rand_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5403. },
  5404. {
  5405. "name": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5406. },
  5407. {
  5408. "name": "aten::rand_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5409. },
  5410. {
  5411. "name": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5412. },
  5413. {
  5414. "name": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5415. },
  5416. {
  5417. "name": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5418. },
  5419. {
  5420. "name": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5421. },
  5422. {
  5423. "name": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5424. },
  5425. {
  5426. "name": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5427. },
  5428. {
  5429. "name": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5430. },
  5431. {
  5432. "name": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5433. },
  5434. {
  5435. "name": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5436. },
  5437. {
  5438. "name": "aten::randint_like.Tensor(Tensor self, Tensor high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5439. },
  5440. {
  5441. "name": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5442. },
  5443. {
  5444. "name": "aten::randint_like.generator(Tensor self, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5445. },
  5446. {
  5447. "name": "aten::randint_like.Tensor_generator(Tensor self, Tensor high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5448. },
  5449. {
  5450. "name": "aten::randint_like.low_generator_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5451. },
  5452. {
  5453. "name": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5454. },
  5455. {
  5456. "name": "aten::randint_like.generator_out(Tensor self, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5457. },
  5458. {
  5459. "name": "aten::randint_like.Tensor_out(Tensor self, Tensor high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5460. },
  5461. {
  5462. "name": "aten::randint_like.Tensor_generator_out(Tensor self, Tensor high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5463. },
  5464. {
  5465. "name": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5466. },
  5467. {
  5468. "name": "aten::randint_like.low_generator_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5469. },
  5470. {
  5471. "name": "aten::randint_like.generator_with_low_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5472. },
  5473. {
  5474. "name": "aten::randint_like.generator_with_low_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5475. },
  5476. {
  5477. "name": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5478. },
  5479. {
  5480. "name": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5481. },
  5482. {
  5483. "name": "aten::randn.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5484. },
  5485. {
  5486. "name": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5487. },
  5488. {
  5489. "name": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5490. },
  5491. {
  5492. "name": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5493. },
  5494. {
  5495. "name": "aten::randn.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5496. },
  5497. {
  5498. "name": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5499. },
  5500. {
  5501. "name": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5502. },
  5503. {
  5504. "name": "aten::randn_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5505. },
  5506. {
  5507. "name": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5508. },
  5509. {
  5510. "name": "aten::randn_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5511. },
  5512. {
  5513. "name": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)"
  5514. },
  5515. {
  5516. "name": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)"
  5517. },
  5518. {
  5519. "name": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)"
  5520. },
  5521. {
  5522. "name": "aten::randperm(SymInt n, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5523. },
  5524. {
  5525. "name": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5526. },
  5527. {
  5528. "name": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  5529. },
  5530. {
  5531. "name": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5532. },
  5533. {
  5534. "name": "aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5535. },
  5536. {
  5537. "name": "aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5538. },
  5539. {
  5540. "name": "aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"
  5541. },
  5542. {
  5543. "name": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
  5544. },
  5545. {
  5546. "name": "aten::real(Tensor(a) self) -> Tensor(a)"
  5547. },
  5548. {
  5549. "name": "aten::reciprocal(Tensor self) -> Tensor"
  5550. },
  5551. {
  5552. "name": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5553. },
  5554. {
  5555. "name": "aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)"
  5556. },
  5557. {
  5558. "name": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  5559. "category": "Tensor"
  5560. },
  5561. {
  5562. "name": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5563. },
  5564. {
  5565. "name": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  5566. "category": "Tensor"
  5567. },
  5568. {
  5569. "name": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5570. },
  5571. {
  5572. "name": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  5573. "category": "Tensor"
  5574. },
  5575. {
  5576. "name": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5577. },
  5578. {
  5579. "name": "aten::relu(Tensor self) -> Tensor",
  5580. "category": "Activation"
  5581. },
  5582. {
  5583. "name": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5584. },
  5585. {
  5586. "name": "aten::relu6(Tensor self) -> Tensor",
  5587. "category": "Activation"
  5588. },
  5589. {
  5590. "name": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)",
  5591. "category": "Activation"
  5592. },
  5593. {
  5594. "name": "aten::relu_(Tensor(a!) self) -> Tensor(a!)",
  5595. "category": "Activation"
  5596. },
  5597. {
  5598. "name": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor"
  5599. },
  5600. {
  5601. "name": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor"
  5602. },
  5603. {
  5604. "name": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  5605. },
  5606. {
  5607. "name": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5608. },
  5609. {
  5610. "name": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5611. },
  5612. {
  5613. "name": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5614. },
  5615. {
  5616. "name": "aten::remainder.int(int a, int b) -> int"
  5617. },
  5618. {
  5619. "name": "aten::remainder.float(float a, float b) -> float"
  5620. },
  5621. {
  5622. "name": "aten::remainder.int_float(int a, float b) -> float"
  5623. },
  5624. {
  5625. "name": "aten::remainder.float_int(float a, int b) -> float"
  5626. },
  5627. {
  5628. "name": "aten::remainder(Scalar a, Scalar b) -> Scalar"
  5629. },
  5630. {
  5631. "name": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  5632. },
  5633. {
  5634. "name": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  5635. },
  5636. {
  5637. "name": "aten::remove.int(int[](a!) self, int el) -> ()"
  5638. },
  5639. {
  5640. "name": "aten::remove.float(float[](a!) self, float el) -> ()"
  5641. },
  5642. {
  5643. "name": "aten::remove.bool(bool[](a!) self, bool el) -> ()"
  5644. },
  5645. {
  5646. "name": "aten::remove.Tensor(Tensor[](a!) self, Tensor el) -> ()"
  5647. },
  5648. {
  5649. "name": "aten::remove.str(str[](a!) self, str el) -> ()"
  5650. },
  5651. {
  5652. "name": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"
  5653. },
  5654. {
  5655. "name": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)"
  5656. },
  5657. {
  5658. "name": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor"
  5659. },
  5660. {
  5661. "name": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)"
  5662. },
  5663. {
  5664. "name": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor"
  5665. },
  5666. {
  5667. "name": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5668. },
  5669. {
  5670. "name": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5671. },
  5672. {
  5673. "name": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)"
  5674. },
  5675. {
  5676. "name": "aten::replace(str self, str old, str new, int max=-1) -> str"
  5677. },
  5678. {
  5679. "name": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  5680. "category": "Tensor"
  5681. },
  5682. {
  5683. "name": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5684. },
  5685. {
  5686. "name": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  5687. "category": "Tensor"
  5688. },
  5689. {
  5690. "name": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5691. },
  5692. {
  5693. "name": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  5694. "category": "Tensor"
  5695. },
  5696. {
  5697. "name": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5698. },
  5699. {
  5700. "name": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)"
  5701. },
  5702. {
  5703. "name": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)",
  5704. "category": "Shape"
  5705. },
  5706. {
  5707. "name": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)",
  5708. "category": "Shape"
  5709. },
  5710. {
  5711. "name": "aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor"
  5712. },
  5713. {
  5714. "name": "aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5715. },
  5716. {
  5717. "name": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)"
  5718. },
  5719. {
  5720. "name": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)"
  5721. },
  5722. {
  5723. "name": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)"
  5724. },
  5725. {
  5726. "name": "aten::retain_grad(Tensor(a!) self) -> ()"
  5727. },
  5728. {
  5729. "name": "aten::reverse.t(t[](a!) self) -> ()"
  5730. },
  5731. {
  5732. "name": "aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor"
  5733. },
  5734. {
  5735. "name": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5736. "category": "Layer"
  5737. },
  5738. {
  5739. "name": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  5740. },
  5741. {
  5742. "name": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5743. "category": "Layer"
  5744. },
  5745. {
  5746. "name": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  5747. },
  5748. {
  5749. "name": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"
  5750. },
  5751. {
  5752. "name": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor",
  5753. "category": "Layer"
  5754. },
  5755. {
  5756. "name": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)"
  5757. },
  5758. {
  5759. "name": "aten::rot90(Tensor self, int k=1, int[] dims=[0, 1]) -> Tensor"
  5760. },
  5761. {
  5762. "name": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0, 1], *, Tensor(a!) out) -> Tensor(a!)"
  5763. },
  5764. {
  5765. "name": "aten::round(Tensor self) -> Tensor"
  5766. },
  5767. {
  5768. "name": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor"
  5769. },
  5770. {
  5771. "name": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5772. },
  5773. {
  5774. "name": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)"
  5775. },
  5776. {
  5777. "name": "aten::round.int(int a) -> float"
  5778. },
  5779. {
  5780. "name": "aten::round.float(float a) -> float"
  5781. },
  5782. {
  5783. "name": "aten::round.Scalar(Scalar a) -> Scalar"
  5784. },
  5785. {
  5786. "name": "aten::round_(Tensor(a!) self) -> Tensor(a!)"
  5787. },
  5788. {
  5789. "name": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)"
  5790. },
  5791. {
  5792. "name": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor",
  5793. "category": "Activation"
  5794. },
  5795. {
  5796. "name": "aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor(a!)",
  5797. "category": "Activation"
  5798. },
  5799. {
  5800. "name": "aten::rsqrt(Tensor self) -> Tensor"
  5801. },
  5802. {
  5803. "name": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5804. },
  5805. {
  5806. "name": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)"
  5807. },
  5808. {
  5809. "name": "aten::rstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  5810. },
  5811. {
  5812. "name": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  5813. },
  5814. {
  5815. "name": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  5816. },
  5817. {
  5818. "name": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  5819. },
  5820. {
  5821. "name": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  5822. },
  5823. {
  5824. "name": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5825. },
  5826. {
  5827. "name": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)"
  5828. },
  5829. {
  5830. "name": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0., bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor",
  5831. "category": "Attention"
  5832. },
  5833. {
  5834. "name": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  5835. },
  5836. {
  5837. "name": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  5838. },
  5839. {
  5840. "name": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor"
  5841. },
  5842. {
  5843. "name": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor"
  5844. },
  5845. {
  5846. "name": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  5847. },
  5848. {
  5849. "name": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  5850. },
  5851. {
  5852. "name": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  5853. },
  5854. {
  5855. "name": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  5856. },
  5857. {
  5858. "name": "aten::scatter.dimname_src(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  5859. },
  5860. {
  5861. "name": "aten::scatter.dimname_value(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  5862. },
  5863. {
  5864. "name": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  5865. },
  5866. {
  5867. "name": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  5868. },
  5869. {
  5870. "name": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)"
  5871. },
  5872. {
  5873. "name": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)"
  5874. },
  5875. {
  5876. "name": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  5877. },
  5878. {
  5879. "name": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  5880. },
  5881. {
  5882. "name": "aten::scatter_add.dimname(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  5883. },
  5884. {
  5885. "name": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  5886. },
  5887. {
  5888. "name": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor"
  5889. },
  5890. {
  5891. "name": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  5892. },
  5893. {
  5894. "name": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)"
  5895. },
  5896. {
  5897. "name": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  5898. },
  5899. {
  5900. "name": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  5901. },
  5902. {
  5903. "name": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  5904. },
  5905. {
  5906. "name": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  5907. },
  5908. {
  5909. "name": "aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor"
  5910. },
  5911. {
  5912. "name": "aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)"
  5913. },
  5914. {
  5915. "name": "aten::select.Dimname(Tensor(a) self, str dim, int index) -> Tensor(a)"
  5916. },
  5917. {
  5918. "name": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)"
  5919. },
  5920. {
  5921. "name": "aten::select.t(t[](a) list, int idx) -> t(*)"
  5922. },
  5923. {
  5924. "name": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor"
  5925. },
  5926. {
  5927. "name": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5928. },
  5929. {
  5930. "name": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor"
  5931. },
  5932. {
  5933. "name": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5934. },
  5935. {
  5936. "name": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor"
  5937. },
  5938. {
  5939. "name": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5940. },
  5941. {
  5942. "name": "aten::selu(Tensor self) -> Tensor",
  5943. "category": "Activation"
  5944. },
  5945. {
  5946. "name": "aten::selu_(Tensor(a!) self) -> Tensor(a!)",
  5947. "category": "Activation"
  5948. },
  5949. {
  5950. "name": "aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)"
  5951. },
  5952. {
  5953. "name": "aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)"
  5954. },
  5955. {
  5956. "name": "aten::set_(Tensor(a!) self) -> Tensor(a!)"
  5957. },
  5958. {
  5959. "name": "aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)"
  5960. },
  5961. {
  5962. "name": "aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)"
  5963. },
  5964. {
  5965. "name": "aten::set_grad_enabled(bool val) -> ()"
  5966. },
  5967. {
  5968. "name": "aten::set_num_threads(int nthreads) -> ()"
  5969. },
  5970. {
  5971. "name": "aten::sigmoid(Tensor self) -> Tensor",
  5972. "category": "Activation"
  5973. },
  5974. {
  5975. "name": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5976. },
  5977. {
  5978. "name": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)",
  5979. "category": "Activation"
  5980. },
  5981. {
  5982. "name": "aten::sign(Tensor self) -> Tensor"
  5983. },
  5984. {
  5985. "name": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5986. },
  5987. {
  5988. "name": "aten::sign_(Tensor(a!) self) -> Tensor(a!)"
  5989. },
  5990. {
  5991. "name": "aten::signbit(Tensor self) -> Tensor"
  5992. },
  5993. {
  5994. "name": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5995. },
  5996. {
  5997. "name": "aten::silu(Tensor self) -> Tensor",
  5998. "category": "Activation"
  5999. },
  6000. {
  6001. "name": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6002. },
  6003. {
  6004. "name": "aten::silu_(Tensor(a!) self) -> Tensor(a!)",
  6005. "category": "Activation"
  6006. },
  6007. {
  6008. "name": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor"
  6009. },
  6010. {
  6011. "name": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6012. },
  6013. {
  6014. "name": "aten::sin(Tensor self) -> Tensor"
  6015. },
  6016. {
  6017. "name": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6018. },
  6019. {
  6020. "name": "aten::sin.int(int a) -> float"
  6021. },
  6022. {
  6023. "name": "aten::sin.float(float a) -> float"
  6024. },
  6025. {
  6026. "name": "aten::sin.complex(complex a) -> complex"
  6027. },
  6028. {
  6029. "name": "aten::sin.Scalar(Scalar a) -> Scalar"
  6030. },
  6031. {
  6032. "name": "aten::sinc(Tensor self) -> Tensor"
  6033. },
  6034. {
  6035. "name": "aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6036. },
  6037. {
  6038. "name": "aten::sinh(Tensor self) -> Tensor"
  6039. },
  6040. {
  6041. "name": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6042. },
  6043. {
  6044. "name": "aten::sinh.int(int a) -> float"
  6045. },
  6046. {
  6047. "name": "aten::sinh.float(float a) -> float"
  6048. },
  6049. {
  6050. "name": "aten::sinh.complex(complex a) -> complex"
  6051. },
  6052. {
  6053. "name": "aten::sinh.Scalar(Scalar a) -> Scalar"
  6054. },
  6055. {
  6056. "name": "aten::size.int(Tensor self, int dim) -> int"
  6057. },
  6058. {
  6059. "name": "aten::size.Dimname(Tensor self, str dim) -> int"
  6060. },
  6061. {
  6062. "name": "aten::size(Tensor self) -> int[]"
  6063. },
  6064. {
  6065. "name": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)",
  6066. "category": "Tensor"
  6067. },
  6068. {
  6069. "name": "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
  6070. "category": "Tensor"
  6071. },
  6072. {
  6073. "name": "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str",
  6074. "category": "Tensor"
  6075. },
  6076. {
  6077. "name": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  6078. },
  6079. {
  6080. "name": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  6081. },
  6082. {
  6083. "name": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  6084. },
  6085. {
  6086. "name": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  6087. },
  6088. {
  6089. "name": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)"
  6090. },
  6091. {
  6092. "name": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  6093. },
  6094. {
  6095. "name": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=1, float beta=1.) -> Tensor"
  6096. },
  6097. {
  6098. "name": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=1, float beta=1., *, Tensor(a!) out) -> Tensor(a!)"
  6099. },
  6100. {
  6101. "name": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6102. },
  6103. {
  6104. "name": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor"
  6105. },
  6106. {
  6107. "name": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  6108. "category": "Activation"
  6109. },
  6110. {
  6111. "name": "aten::softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  6112. "category": "Activation"
  6113. },
  6114. {
  6115. "name": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  6116. },
  6117. {
  6118. "name": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor",
  6119. "category": "Activation"
  6120. },
  6121. {
  6122. "name": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)"
  6123. },
  6124. {
  6125. "name": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  6126. },
  6127. {
  6128. "name": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  6129. },
  6130. {
  6131. "name": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  6132. },
  6133. {
  6134. "name": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  6135. },
  6136. {
  6137. "name": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6138. },
  6139. {
  6140. "name": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6141. },
  6142. {
  6143. "name": "aten::sort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  6144. },
  6145. {
  6146. "name": "aten::sort.dimname_values(Tensor self, str dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6147. },
  6148. {
  6149. "name": "aten::sort.dimname_stable(Tensor self, *, bool? stable, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  6150. },
  6151. {
  6152. "name": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, str dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6153. },
  6154. {
  6155. "name": "aten::sort.int(int[](a!) self, bool reverse=False) -> ()"
  6156. },
  6157. {
  6158. "name": "aten::sort.float(float[](a!) self, bool reverse=False) -> ()"
  6159. },
  6160. {
  6161. "name": "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()"
  6162. },
  6163. {
  6164. "name": "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()"
  6165. },
  6166. {
  6167. "name": "aten::sort.str(str[](a!) self, bool reverse=False) -> ()"
  6168. },
  6169. {
  6170. "name": "aten::sort.any(t[](a!) self, bool reverse=False) -> ()"
  6171. },
  6172. {
  6173. "name": "aten::sorted.int(int[](a) input) -> int[]"
  6174. },
  6175. {
  6176. "name": "aten::sorted.float(float[](a) input) -> float[]"
  6177. },
  6178. {
  6179. "name": "aten::sorted.Tensor(Tensor[](a) input) -> Tensor[]"
  6180. },
  6181. {
  6182. "name": "aten::sorted.bool(bool[](a) input) -> bool[]"
  6183. },
  6184. {
  6185. "name": "aten::sorted.str(str[](a) input) -> str[]"
  6186. },
  6187. {
  6188. "name": "aten::sorted.any(t[](a) self) -> t[]"
  6189. },
  6190. {
  6191. "name": "aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6192. },
  6193. {
  6194. "name": "aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6195. },
  6196. {
  6197. "name": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6198. },
  6199. {
  6200. "name": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  6201. },
  6202. {
  6203. "name": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  6204. },
  6205. {
  6206. "name": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"
  6207. },
  6208. {
  6209. "name": "aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6210. },
  6211. {
  6212. "name": "aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6213. },
  6214. {
  6215. "name": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6216. },
  6217. {
  6218. "name": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  6219. },
  6220. {
  6221. "name": "aten::special_expit(Tensor self) -> Tensor"
  6222. },
  6223. {
  6224. "name": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6225. },
  6226. {
  6227. "name": "aten::special_expm1(Tensor self) -> Tensor"
  6228. },
  6229. {
  6230. "name": "aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6231. },
  6232. {
  6233. "name": "aten::special_logit(Tensor self, float? eps=None) -> Tensor"
  6234. },
  6235. {
  6236. "name": "aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"
  6237. },
  6238. {
  6239. "name": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]",
  6240. "category": "Tensor"
  6241. },
  6242. {
  6243. "name": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]",
  6244. "category": "Tensor"
  6245. },
  6246. {
  6247. "name": "aten::split.str(str self, str? separator=None, int max=-1) -> str[]"
  6248. },
  6249. {
  6250. "name": "aten::split(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]"
  6251. },
  6252. {
  6253. "name": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]"
  6254. },
  6255. {
  6256. "name": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  6257. },
  6258. {
  6259. "name": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]",
  6260. "category": "Tensor"
  6261. },
  6262. {
  6263. "name": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]",
  6264. "category": "Tensor"
  6265. },
  6266. {
  6267. "name": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()",
  6268. "category": "Tensor"
  6269. },
  6270. {
  6271. "name": "aten::splitlines(str self, bool keepends=False) -> str[]"
  6272. },
  6273. {
  6274. "name": "aten::sqrt(Tensor self) -> Tensor"
  6275. },
  6276. {
  6277. "name": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6278. },
  6279. {
  6280. "name": "aten::sqrt.int(int a) -> float"
  6281. },
  6282. {
  6283. "name": "aten::sqrt.float(float a) -> float"
  6284. },
  6285. {
  6286. "name": "aten::sqrt.complex(complex a) -> complex"
  6287. },
  6288. {
  6289. "name": "aten::sqrt.Scalar(Scalar a) -> Scalar"
  6290. },
  6291. {
  6292. "name": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)"
  6293. },
  6294. {
  6295. "name": "aten::square(Tensor self) -> Tensor"
  6296. },
  6297. {
  6298. "name": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6299. },
  6300. {
  6301. "name": "aten::square_(Tensor(a!) self) -> Tensor(a!)"
  6302. },
  6303. {
  6304. "name": "aten::squeeze(Tensor(a) self) -> Tensor(a)",
  6305. "category": "Transform"
  6306. },
  6307. {
  6308. "name": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)",
  6309. "category": "Transform"
  6310. },
  6311. {
  6312. "name": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)",
  6313. "category": "Transform"
  6314. },
  6315. {
  6316. "name": "aten::squeeze.dimname(Tensor(a) self, str dim) -> Tensor(a)",
  6317. "category": "Transform"
  6318. },
  6319. {
  6320. "name": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)",
  6321. "category": "Transform"
  6322. },
  6323. {
  6324. "name": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)",
  6325. "category": "Transform"
  6326. },
  6327. {
  6328. "name": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)"
  6329. },
  6330. {
  6331. "name": "aten::squeeze_.dimname(Tensor(a!) self, str dim) -> Tensor(a!)",
  6332. "category": "Transform"
  6333. },
  6334. {
  6335. "name": "aten::squeeze_copy(Tensor self) -> Tensor"
  6336. },
  6337. {
  6338. "name": "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor"
  6339. },
  6340. {
  6341. "name": "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor"
  6342. },
  6343. {
  6344. "name": "aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6345. },
  6346. {
  6347. "name": "aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  6348. },
  6349. {
  6350. "name": "aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)"
  6351. },
  6352. {
  6353. "name": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor",
  6354. "category": "Tensor"
  6355. },
  6356. {
  6357. "name": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  6358. },
  6359. {
  6360. "name": "aten::startswith(str self, str substr, int start=0, int end=-1) -> bool"
  6361. },
  6362. {
  6363. "name": "aten::std(Tensor self, bool unbiased=True) -> Tensor"
  6364. },
  6365. {
  6366. "name": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6367. },
  6368. {
  6369. "name": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6370. },
  6371. {
  6372. "name": "aten::std.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6373. },
  6374. {
  6375. "name": "aten::std.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6376. },
  6377. {
  6378. "name": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6379. },
  6380. {
  6381. "name": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6382. },
  6383. {
  6384. "name": "aten::std.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6385. },
  6386. {
  6387. "name": "aten::std.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6388. },
  6389. {
  6390. "name": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  6391. },
  6392. {
  6393. "name": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6394. },
  6395. {
  6396. "name": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6397. },
  6398. {
  6399. "name": "aten::std_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6400. },
  6401. {
  6402. "name": "aten::std_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6403. },
  6404. {
  6405. "name": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  6406. },
  6407. {
  6408. "name": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor"
  6409. },
  6410. {
  6411. "name": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor"
  6412. },
  6413. {
  6414. "name": "aten::str(t elem) -> str"
  6415. },
  6416. {
  6417. "name": "aten::stride.int(Tensor self, int dim) -> int"
  6418. },
  6419. {
  6420. "name": "aten::stride.Dimname(Tensor self, str dim) -> int"
  6421. },
  6422. {
  6423. "name": "aten::stride(Tensor self) -> int[]"
  6424. },
  6425. {
  6426. "name": "aten::strip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  6427. },
  6428. {
  6429. "name": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  6430. },
  6431. {
  6432. "name": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  6433. },
  6434. {
  6435. "name": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  6436. },
  6437. {
  6438. "name": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  6439. },
  6440. {
  6441. "name": "aten::sub.int(int a, int b) -> int"
  6442. },
  6443. {
  6444. "name": "aten::sub.complex(complex a, complex b) -> complex"
  6445. },
  6446. {
  6447. "name": "aten::sub.float(float a, float b) -> float"
  6448. },
  6449. {
  6450. "name": "aten::sub.int_complex(int a, complex b) -> complex"
  6451. },
  6452. {
  6453. "name": "aten::sub.complex_int(complex a, int b) -> complex"
  6454. },
  6455. {
  6456. "name": "aten::sub.float_complex(float a, complex b) -> complex"
  6457. },
  6458. {
  6459. "name": "aten::sub.complex_float(complex a, float b) -> complex"
  6460. },
  6461. {
  6462. "name": "aten::sub.int_float(int a, float b) -> float"
  6463. },
  6464. {
  6465. "name": "aten::sub.float_int(float a, int b) -> float"
  6466. },
  6467. {
  6468. "name": "aten::sub(Scalar a, Scalar b) -> Scalar"
  6469. },
  6470. {
  6471. "name": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  6472. },
  6473. {
  6474. "name": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  6475. },
  6476. {
  6477. "name": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6478. },
  6479. {
  6480. "name": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  6481. },
  6482. {
  6483. "name": "aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6484. },
  6485. {
  6486. "name": "aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6487. },
  6488. {
  6489. "name": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6490. },
  6491. {
  6492. "name": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6493. },
  6494. {
  6495. "name": "aten::sum.int(int[] self) -> int"
  6496. },
  6497. {
  6498. "name": "aten::sum.float(float[] self) -> float"
  6499. },
  6500. {
  6501. "name": "aten::sum.complex(complex[] self) -> complex"
  6502. },
  6503. {
  6504. "name": "aten::sum.bool(bool[] self) -> int"
  6505. },
  6506. {
  6507. "name": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)"
  6508. },
  6509. {
  6510. "name": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)"
  6511. },
  6512. {
  6513. "name": "aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()"
  6514. },
  6515. {
  6516. "name": "aten::sym_size.int(Tensor self, int dim) -> SymInt"
  6517. },
  6518. {
  6519. "name": "aten::sym_size(Tensor self) -> SymInt[]"
  6520. },
  6521. {
  6522. "name": "aten::t(Tensor(a) self) -> Tensor(a)"
  6523. },
  6524. {
  6525. "name": "aten::t_(Tensor(a!) self) -> Tensor(a!)"
  6526. },
  6527. {
  6528. "name": "aten::take(Tensor self, Tensor index) -> Tensor",
  6529. "category": "Activation"
  6530. },
  6531. {
  6532. "name": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  6533. },
  6534. {
  6535. "name": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor"
  6536. },
  6537. {
  6538. "name": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  6539. },
  6540. {
  6541. "name": "aten::tan(Tensor self) -> Tensor"
  6542. },
  6543. {
  6544. "name": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6545. },
  6546. {
  6547. "name": "aten::tan.int(int a) -> float"
  6548. },
  6549. {
  6550. "name": "aten::tan.float(float a) -> float"
  6551. },
  6552. {
  6553. "name": "aten::tan.complex(complex a) -> complex"
  6554. },
  6555. {
  6556. "name": "aten::tan.Scalar(Scalar a) -> Scalar"
  6557. },
  6558. {
  6559. "name": "aten::tan_(Tensor(a!) self) -> Tensor(a!)"
  6560. },
  6561. {
  6562. "name": "aten::tanh(Tensor self) -> Tensor",
  6563. "category": "Activation"
  6564. },
  6565. {
  6566. "name": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)",
  6567. "category": "Activation"
  6568. },
  6569. {
  6570. "name": "aten::tanh.int(int a) -> float",
  6571. "category": "Activation"
  6572. },
  6573. {
  6574. "name": "aten::tanh.float(float a) -> float",
  6575. "category": "Activation"
  6576. },
  6577. {
  6578. "name": "aten::tanh.complex(complex a) -> complex",
  6579. "category": "Activation"
  6580. },
  6581. {
  6582. "name": "aten::tanh.Scalar(Scalar a) -> Scalar",
  6583. "category": "Activation"
  6584. },
  6585. {
  6586. "name": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)",
  6587. "category": "Activation"
  6588. },
  6589. {
  6590. "name": "aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6591. },
  6592. {
  6593. "name": "aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6594. },
  6595. {
  6596. "name": "aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6597. },
  6598. {
  6599. "name": "aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6600. },
  6601. {
  6602. "name": "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6603. },
  6604. {
  6605. "name": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]"
  6606. },
  6607. {
  6608. "name": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]"
  6609. },
  6610. {
  6611. "name": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]"
  6612. },
  6613. {
  6614. "name": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor"
  6615. },
  6616. {
  6617. "name": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)"
  6618. },
  6619. {
  6620. "name": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor",
  6621. "category": "Activation"
  6622. },
  6623. {
  6624. "name": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  6625. },
  6626. {
  6627. "name": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)",
  6628. "category": "Activation"
  6629. },
  6630. {
  6631. "name": "aten::tile(Tensor self, SymInt[] dims) -> Tensor"
  6632. },
  6633. {
  6634. "name": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6635. },
  6636. {
  6637. "name": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6638. },
  6639. {
  6640. "name": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6641. },
  6642. {
  6643. "name": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6644. },
  6645. {
  6646. "name": "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6647. },
  6648. {
  6649. "name": "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6650. },
  6651. {
  6652. "name": "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6653. },
  6654. {
  6655. "name": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor"
  6656. },
  6657. {
  6658. "name": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor"
  6659. },
  6660. {
  6661. "name": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor"
  6662. },
  6663. {
  6664. "name": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  6665. },
  6666. {
  6667. "name": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor"
  6668. },
  6669. {
  6670. "name": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  6671. },
  6672. {
  6673. "name": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor"
  6674. },
  6675. {
  6676. "name": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor"
  6677. },
  6678. {
  6679. "name": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor"
  6680. },
  6681. {
  6682. "name": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  6683. },
  6684. {
  6685. "name": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  6686. },
  6687. {
  6688. "name": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor"
  6689. },
  6690. {
  6691. "name": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor"
  6692. },
  6693. {
  6694. "name": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)"
  6695. },
  6696. {
  6697. "name": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6698. },
  6699. {
  6700. "name": "aten::trace(Tensor self) -> Tensor"
  6701. },
  6702. {
  6703. "name": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6704. },
  6705. {
  6706. "name": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)",
  6707. "category": "Transform"
  6708. },
  6709. {
  6710. "name": "aten::transpose.Dimname(Tensor(a) self, str dim0, str dim1) -> Tensor(a)",
  6711. "category": "Transform"
  6712. },
  6713. {
  6714. "name": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)",
  6715. "category": "Transform"
  6716. },
  6717. {
  6718. "name": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor"
  6719. },
  6720. {
  6721. "name": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)"
  6722. },
  6723. {
  6724. "name": "aten::tril(Tensor self, SymInt diagonal=0) -> Tensor",
  6725. "category": "Layer"
  6726. },
  6727. {
  6728. "name": "aten::tril.out(Tensor self, SymInt diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  6729. },
  6730. {
  6731. "name": "aten::tril_(Tensor(a!) self, SymInt diagonal=0) -> Tensor(a!)"
  6732. },
  6733. {
  6734. "name": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
  6735. "category": "Layer"
  6736. },
  6737. {
  6738. "name": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  6739. },
  6740. {
  6741. "name": "aten::triu(Tensor self, SymInt diagonal=0) -> Tensor"
  6742. },
  6743. {
  6744. "name": "aten::triu.out(Tensor self, SymInt diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  6745. },
  6746. {
  6747. "name": "aten::triu_(Tensor(a!) self, SymInt diagonal=0) -> Tensor(a!)"
  6748. },
  6749. {
  6750. "name": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  6751. },
  6752. {
  6753. "name": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  6754. },
  6755. {
  6756. "name": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor"
  6757. },
  6758. {
  6759. "name": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  6760. },
  6761. {
  6762. "name": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6763. },
  6764. {
  6765. "name": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  6766. },
  6767. {
  6768. "name": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  6769. },
  6770. {
  6771. "name": "aten::trunc(Tensor self) -> Tensor"
  6772. },
  6773. {
  6774. "name": "aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6775. },
  6776. {
  6777. "name": "aten::type_as(Tensor self, Tensor other) -> Tensor"
  6778. },
  6779. {
  6780. "name": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]"
  6781. },
  6782. {
  6783. "name": "aten::unbind.Dimname(Tensor(a -> *) self, str dim) -> Tensor(a)[]"
  6784. },
  6785. {
  6786. "name": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)",
  6787. "category": "Shape"
  6788. },
  6789. {
  6790. "name": "aten::unflatten.Dimname(Tensor(a) self, str dim, SymInt[] sizes, str[] names) -> Tensor(a)",
  6791. "category": "Shape"
  6792. },
  6793. {
  6794. "name": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)"
  6795. },
  6796. {
  6797. "name": "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor"
  6798. },
  6799. {
  6800. "name": "aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)"
  6801. },
  6802. {
  6803. "name": "aten::uniform_(Tensor(a!) self, float from=0., float to=1., *, Generator? generator=None) -> Tensor(a!)"
  6804. },
  6805. {
  6806. "name": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)",
  6807. "category": "Layer"
  6808. },
  6809. {
  6810. "name": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6811. },
  6812. {
  6813. "name": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  6814. },
  6815. {
  6816. "name": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6817. },
  6818. {
  6819. "name": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)",
  6820. "category": "Layer"
  6821. },
  6822. {
  6823. "name": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6824. },
  6825. {
  6826. "name": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]"
  6827. },
  6828. {
  6829. "name": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]",
  6830. "category": "Tensor"
  6831. },
  6832. {
  6833. "name": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  6834. },
  6835. {
  6836. "name": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)",
  6837. "category": "Transform"
  6838. },
  6839. {
  6840. "name": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)",
  6841. "category": "Transform"
  6842. },
  6843. {
  6844. "name": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor"
  6845. },
  6846. {
  6847. "name": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  6848. },
  6849. {
  6850. "name": "aten::update.str(Dict(str, t)(a!) self, Dict(str, t)(a!) to_add) -> ()"
  6851. },
  6852. {
  6853. "name": "aten::update.int(Dict(int, t)(a!) self, Dict(int, t)(a!) to_add) -> ()"
  6854. },
  6855. {
  6856. "name": "aten::update.bool(Dict(bool, t)(a!) self, Dict(bool, t)(a!) to_add) -> ()"
  6857. },
  6858. {
  6859. "name": "aten::update.float(Dict(float, t)(a!) self, Dict(float, t)(a!) to_add) -> ()"
  6860. },
  6861. {
  6862. "name": "aten::update.complex(Dict(complex, t)(a!) self, Dict(complex, t)(a!) to_add) -> ()"
  6863. },
  6864. {
  6865. "name": "aten::update.Tensor(Dict(Tensor, t)(a!) self, Dict(Tensor, t)(a!) to_add) -> ()"
  6866. },
  6867. {
  6868. "name": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6869. "category": "Layer"
  6870. },
  6871. {
  6872. "name": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6873. "category": "Layer"
  6874. },
  6875. {
  6876. "name": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)",
  6877. "category": "Layer"
  6878. },
  6879. {
  6880. "name": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6881. "category": "Layer"
  6882. },
  6883. {
  6884. "name": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6885. "category": "Layer"
  6886. },
  6887. {
  6888. "name": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)",
  6889. "category": "Layer"
  6890. },
  6891. {
  6892. "name": "aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)",
  6893. "category": "Layer"
  6894. },
  6895. {
  6896. "name": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6897. "category": "Layer"
  6898. },
  6899. {
  6900. "name": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)",
  6901. "category": "Layer"
  6902. },
  6903. {
  6904. "name": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor",
  6905. "category": "Layer"
  6906. },
  6907. {
  6908. "name": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6909. "category": "Layer"
  6910. },
  6911. {
  6912. "name": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)",
  6913. "category": "Layer"
  6914. },
  6915. {
  6916. "name": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor",
  6917. "category": "Layer"
  6918. },
  6919. {
  6920. "name": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6921. "category": "Layer"
  6922. },
  6923. {
  6924. "name": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  6925. },
  6926. {
  6927. "name": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor",
  6928. "category": "Layer"
  6929. },
  6930. {
  6931. "name": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6932. "category": "Layer"
  6933. },
  6934. {
  6935. "name": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6936. },
  6937. {
  6938. "name": "aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)"
  6939. },
  6940. {
  6941. "name": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  6942. },
  6943. {
  6944. "name": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6945. },
  6946. {
  6947. "name": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor",
  6948. "category": "Layer"
  6949. },
  6950. {
  6951. "name": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6952. "category": "Layer"
  6953. },
  6954. {
  6955. "name": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6956. },
  6957. {
  6958. "name": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  6959. },
  6960. {
  6961. "name": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  6962. },
  6963. {
  6964. "name": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6965. },
  6966. {
  6967. "name": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  6968. },
  6969. {
  6970. "name": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6971. },
  6972. {
  6973. "name": "aten::values(Tensor(a) self) -> Tensor(a)"
  6974. },
  6975. {
  6976. "name": "aten::values.str(Dict(str, t) self) -> t[](*)"
  6977. },
  6978. {
  6979. "name": "aten::values.int(Dict(int, t) self) -> t[](*)"
  6980. },
  6981. {
  6982. "name": "aten::values.bool(Dict(bool, t) self) -> t[](*)"
  6983. },
  6984. {
  6985. "name": "aten::values.float(Dict(float, t) self) -> t[](*)"
  6986. },
  6987. {
  6988. "name": "aten::values.complex(Dict(complex, t) self) -> t[](*)"
  6989. },
  6990. {
  6991. "name": "aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)"
  6992. },
  6993. {
  6994. "name": "aten::var(Tensor self, bool unbiased=True) -> Tensor"
  6995. },
  6996. {
  6997. "name": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6998. },
  6999. {
  7000. "name": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  7001. },
  7002. {
  7003. "name": "aten::var.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  7004. },
  7005. {
  7006. "name": "aten::var.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  7007. },
  7008. {
  7009. "name": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  7010. },
  7011. {
  7012. "name": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  7013. },
  7014. {
  7015. "name": "aten::var.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  7016. },
  7017. {
  7018. "name": "aten::var.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  7019. },
  7020. {
  7021. "name": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  7022. },
  7023. {
  7024. "name": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  7025. },
  7026. {
  7027. "name": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  7028. },
  7029. {
  7030. "name": "aten::var_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  7031. },
  7032. {
  7033. "name": "aten::var_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  7034. },
  7035. {
  7036. "name": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  7037. },
  7038. {
  7039. "name": "aten::vdot(Tensor self, Tensor other) -> Tensor"
  7040. },
  7041. {
  7042. "name": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7043. },
  7044. {
  7045. "name": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  7046. },
  7047. {
  7048. "name": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)"
  7049. },
  7050. {
  7051. "name": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  7052. },
  7053. {
  7054. "name": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)"
  7055. },
  7056. {
  7057. "name": "aten::view_as_complex_copy(Tensor self) -> Tensor"
  7058. },
  7059. {
  7060. "name": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  7061. },
  7062. {
  7063. "name": "aten::view_as_real(Tensor(a) self) -> Tensor(a)"
  7064. },
  7065. {
  7066. "name": "aten::view_as_real_copy(Tensor self) -> Tensor"
  7067. },
  7068. {
  7069. "name": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  7070. },
  7071. {
  7072. "name": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor"
  7073. },
  7074. {
  7075. "name": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor"
  7076. },
  7077. {
  7078. "name": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  7079. },
  7080. {
  7081. "name": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  7082. },
  7083. {
  7084. "name": "aten::vstack(Tensor[] tensors) -> Tensor"
  7085. },
  7086. {
  7087. "name": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  7088. },
  7089. {
  7090. "name": "aten::wait(Future(t) self) -> t"
  7091. },
  7092. {
  7093. "name": "aten::warn(str message, int stacklevel=2) -> ()"
  7094. },
  7095. {
  7096. "name": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor"
  7097. },
  7098. {
  7099. "name": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor"
  7100. },
  7101. {
  7102. "name": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor"
  7103. },
  7104. {
  7105. "name": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor"
  7106. },
  7107. {
  7108. "name": "aten::where(Tensor condition) -> Tensor[]"
  7109. },
  7110. {
  7111. "name": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7112. },
  7113. {
  7114. "name": "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor"
  7115. },
  7116. {
  7117. "name": "aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor"
  7118. },
  7119. {
  7120. "name": "aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor"
  7121. },
  7122. {
  7123. "name": "aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7124. },
  7125. {
  7126. "name": "aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7127. },
  7128. {
  7129. "name": "aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  7130. },
  7131. {
  7132. "name": "aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  7133. },
  7134. {
  7135. "name": "aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  7136. },
  7137. {
  7138. "name": "aten::zero_(Tensor(a!) self) -> Tensor(a!)"
  7139. },
  7140. {
  7141. "name": "aten::zeros.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  7142. },
  7143. {
  7144. "name": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  7145. },
  7146. {
  7147. "name": "aten::zeros.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  7148. },
  7149. {
  7150. "name": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  7151. },
  7152. {
  7153. "name": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  7154. },
  7155. {
  7156. "name": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  7157. },
  7158. {
  7159. "name": "cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  7160. },
  7161. {
  7162. "name": "cortex_m::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  7163. },
  7164. {
  7165. "name": "cortex_m::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7166. },
  7167. {
  7168. "name": "cortex_m::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  7169. },
  7170. {
  7171. "name": "cortex_m::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  7172. },
  7173. {
  7174. "name": "cortex_m::quantized_add.out(Tensor self, Scalar self_zero_point, Scalar self_multiplier, Scalar self_shift, Tensor other, Scalar other_zero_point, Scalar other_multiplier, Scalar other_shift, Scalar output_zero_point, Scalar output_multiplier, Scalar output_shift, *, Tensor(a!) out) -> Tensor(a!)"
  7175. },
  7176. {
  7177. "name": "cortex_m::quantized_conv2d.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int input_offset, int output_offset, Tensor requantize_multipliers, Tensor requantize_shifts, int activation_min, int activation_max, *, Tensor(a!) out) -> Tensor(a!)"
  7178. },
  7179. {
  7180. "name": "cortex_m::quantized_linear.out(Tensor input, Tensor weights, Tensor? bias, Tensor? kernel_sum, Scalar input_offset, Scalar filter_offset, Scalar output_offset, int[] requantize_multipliers, int[] requantize_shifts, Scalar activation_max, Scalar activation_min, *, Tensor(a!) out) -> Tensor(a!)"
  7181. },
  7182. {
  7183. "name": "cortex_m::quantized_mul.out(Tensor self, Scalar self_zero_point, Tensor other, Scalar other_zero_point, Scalar output_zero_point, Scalar output_multiplier, Scalar output_shift, *, Tensor(a!) out) -> Tensor(a!)"
  7184. },
  7185. {
  7186. "name": "cortex_m::transpose.out(Tensor input, int[] perm, *, Tensor(a!) out) -> Tensor(a!)"
  7187. },
  7188. {
  7189. "name": "detectron2::nms_rotated(Tensor boxes, Tensor scores, float iou_threshold) -> Tensor"
  7190. },
  7191. {
  7192. "name": "detectron2::roi_align_rotated_forward(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio) -> Tensor"
  7193. },
  7194. {
  7195. "name": "dim_order_ops::_clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  7196. },
  7197. {
  7198. "name": "dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  7199. },
  7200. {
  7201. "name": "dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  7202. },
  7203. {
  7204. "name": "executorch_prim::add.Scalar(Scalar a, Scalar b) -> Scalar"
  7205. },
  7206. {
  7207. "name": "executorch_prim::ceil.Scalar(Scalar a) -> Scalar"
  7208. },
  7209. {
  7210. "name": "executorch_prim::eq.Scalar(Scalar a, Scalar b) -> bool"
  7211. },
  7212. {
  7213. "name": "executorch_prim::et_view.default(Tensor self, int[] size) -> (Tensor out)"
  7214. },
  7215. {
  7216. "name": "executorch_prim::floordiv.Scalar(Scalar a, Scalar b) -> Scalar"
  7217. },
  7218. {
  7219. "name": "executorch_prim::ge.Scalar(Scalar a, Scalar b) -> bool"
  7220. },
  7221. {
  7222. "name": "executorch_prim::gt.Scalar(Scalar a, Scalar b) -> bool"
  7223. },
  7224. {
  7225. "name": "executorch_prim::le.Scalar(Scalar a, Scalar b) -> bool"
  7226. },
  7227. {
  7228. "name": "executorch_prim::lt.Scalar(Scalar a, Scalar b) -> bool"
  7229. },
  7230. {
  7231. "name": "executorch_prim::mod.Scalar(SymInt a, SymInt b) -> SymInt"
  7232. },
  7233. {
  7234. "name": "executorch_prim::mul.Scalar(Scalar a, Scalar b) -> Scalar"
  7235. },
  7236. {
  7237. "name": "executorch_prim::neg.Scalar(Scalar a) -> Scalar"
  7238. },
  7239. {
  7240. "name": "executorch_prim::round.Scalar(Scalar a) -> Scalar"
  7241. },
  7242. {
  7243. "name": "executorch_prim::sub.Scalar(Scalar a, Scalar b) -> Scalar"
  7244. },
  7245. {
  7246. "name": "executorch_prim::sym_float.Scalar(Scalar a) -> Scalar"
  7247. },
  7248. {
  7249. "name": "executorch_prim::sym_max.Scalar(Scalar a, Scalar b) -> Scalar"
  7250. },
  7251. {
  7252. "name": "executorch_prim::sym_min.Scalar(Scalar a, Scalar b) -> Scalar"
  7253. },
  7254. {
  7255. "name": "executorch_prim::truediv.Scalar(Scalar a, Scalar b) -> Scalar"
  7256. },
  7257. {
  7258. "name": "executorch_prim::trunc.Scalar(Scalar a) -> Scalar"
  7259. },
  7260. {
  7261. "name": "fbgemm::asynchronous_complete_cumsum(Tensor t_in) -> Tensor"
  7262. },
  7263. {
  7264. "name": "fbgemm::bf16i4bf16_rowwise(Tensor X, Tensor WQ, Tensor w_scale, Tensor w_zp) -> Tensor"
  7265. },
  7266. {
  7267. "name": "fbgemm::car_init(int rank, int world_size, Tensor local_barrier, Tensor[] all_barrier_handles, Tensor local_buffer, Tensor[] all_buffer_handles) -> ()"
  7268. },
  7269. {
  7270. "name": "fbgemm::car_ipc_handle(Tensor buffer) -> Tensor"
  7271. },
  7272. {
  7273. "name": "fbgemm::car_tensor() -> Tensor"
  7274. },
  7275. {
  7276. "name": "fbgemm::dequantize_fp8_cache(Tensor cache_K, Tensor cache_V, Tensor kv_seqlen, Tensor? qparam_k=None, Tensor? qparam_v=None) -> (Tensor, Tensor)"
  7277. },
  7278. {
  7279. "name": "fbgemm::dequantize_int4_cache(Tensor cache_K, Tensor cache_V, Tensor kv_seqlen, int? num_groups=1) -> (Tensor, Tensor)"
  7280. },
  7281. {
  7282. "name": "fbgemm::f8f8bf16(Tensor XQ, Tensor WQ, Tensor scale, bool use_fast_accum=True) -> Tensor"
  7283. },
  7284. {
  7285. "name": "fbgemm::f8f8bf16_blockwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, int block_m=128, int block_n=128, int block_k=128) -> Tensor"
  7286. },
  7287. {
  7288. "name": "fbgemm::f8f8bf16_cublas(Tensor A, Tensor B, Tensor? Ainvs=None, Tensor? Binvs=None, bool use_fast_accum=True, Tensor(a!)? output=None) -> Tensor"
  7289. },
  7290. {
  7291. "name": "fbgemm::f8f8bf16_rowwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, Tensor? bias=None, bool use_fast_accum=True, Tensor(a!)? output=None) -> Tensor"
  7292. },
  7293. {
  7294. "name": "fbgemm::f8f8bf16_tensorwise(Tensor XQ, Tensor WQ, float scale, bool use_fast_accum=True) -> Tensor"
  7295. },
  7296. {
  7297. "name": "fbgemm::f8i4bf16_rowwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, Tensor w_zp) -> Tensor"
  7298. },
  7299. {
  7300. "name": "fbgemm::get_fp8_per_tensor_scale(Tensor input, Tensor? bs=None, Tensor? scale_ub=None) -> Tensor"
  7301. },
  7302. {
  7303. "name": "fbgemm::gqa_attn_splitk(Tensor XQ, Tensor cache_K, Tensor cache_V, Tensor seq_positions, float qk_scale, int num_split_ks, int kv_cache_quant_num_groups=1, bool use_tensor_cores=True, int cache_logical_dtype_int=0) -> (Tensor, Tensor, Tensor)"
  7304. },
  7305. {
  7306. "name": "fbgemm::i8i8bf16(Tensor XQ, Tensor WQ, float scale, int split_k=1) -> Tensor"
  7307. },
  7308. {
  7309. "name": "fbgemm::i8i8bf16_dynamic(Tensor XQ, Tensor WQ, Tensor scale, int split_k=1) -> Tensor"
  7310. },
  7311. {
  7312. "name": "fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.) -> Tensor"
  7313. },
  7314. {
  7315. "name": "fbgemm::mqa_attn(Tensor XQ, Tensor cache_K, Tensor cache_V, Tensor seq_positions, float qk_scale, int? num_groups=1, int cache_logical_dtype_int=0) -> Tensor"
  7316. },
  7317. {
  7318. "name": "fbgemm::nccl_allgather(Tensor dst, Tensor src, int comm_idx=0) -> ()"
  7319. },
  7320. {
  7321. "name": "fbgemm::nccl_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  7322. },
  7323. {
  7324. "name": "fbgemm::nccl_alltoall(Tensor dst, Tensor src, int world_size, int comm_idx=0) -> ()"
  7325. },
  7326. {
  7327. "name": "fbgemm::nccl_comm_init_rank(int world_size, int rank, Tensor id_, int comm_idx=0) -> ()"
  7328. },
  7329. {
  7330. "name": "fbgemm::nccl_get_unique_id() -> Tensor"
  7331. },
  7332. {
  7333. "name": "fbgemm::nccl_init(int rank, int world_size, str rendevouz, int comm_idx=0) -> ()"
  7334. },
  7335. {
  7336. "name": "fbgemm::nccl_reducescatter(Tensor dst, Tensor src, int comm_idx=0) -> ()"
  7337. },
  7338. {
  7339. "name": "fbgemm::one_shot_car_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  7340. },
  7341. {
  7342. "name": "fbgemm::per_tensor_dynamic_quantize_i8(Tensor X) -> (Tensor, Tensor)"
  7343. },
  7344. {
  7345. "name": "fbgemm::per_tensor_quantize_i8(Tensor X, float scale) -> Tensor"
  7346. },
  7347. {
  7348. "name": "fbgemm::quantize_fp8_per_col(Tensor input, Tensor? bs=None, Tensor? scale_ub=None) -> Tensor[]"
  7349. },
  7350. {
  7351. "name": "fbgemm::quantize_fp8_per_row(Tensor input, Tensor? bs=None, Tensor? scale_ub=None, ScalarType? output_dtype=None, bool stochastic_rounding=False) -> Tensor[]"
  7352. },
  7353. {
  7354. "name": "fbgemm::quantize_fp8_per_tensor(Tensor input, Tensor? bs=None, Tensor? scale_ub=None, bool stochastic_rounding=False) -> Tensor[]"
  7355. },
  7356. {
  7357. "name": "fbgemm::quantize_fp8_per_tensor_fixed_scale(Tensor input, Tensor scale, Tensor? bs=None, bool stochatic_rounding=False) -> Tensor"
  7358. },
  7359. {
  7360. "name": "fbgemm::rope_qkv_decoding(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor seqpos, float theta, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? actual_batch_size=None, Tensor? batch=None, Tensor? cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7361. },
  7362. {
  7363. "name": "fbgemm::rope_qkv_varseq_prefill(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor varseq_batch, Tensor varseq_seqpos, float theta, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? varseq_cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7364. },
  7365. {
  7366. "name": "fbgemm::silu_mul_quantize_i8(Tensor X1, Tensor X2, float scale) -> Tensor"
  7367. },
  7368. {
  7369. "name": "fbgemm::two_shot_car_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  7370. },
  7371. {
  7372. "name": "fbgemm::xpos_qkv_decoding(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor seqpos, float theta, float gamma, float scale_base, float exponent_offset, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? actual_batch_size=None, Tensor? batch=None, Tensor? cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7373. },
  7374. {
  7375. "name": "fbgemm::xpos_qkv_varseq_prefill(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor varseq_batch, Tensor varseq_seqpos, float theta, float gamma, float scale_base, float exponent_offset, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? varseq_cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7376. },
  7377. {
  7378. "name": "horizon::scale_quanti(Tensor x, Tensor scale, Tensor zero_point, int d, int min, int max, bool flag1, bool flat2, str str1, str str2) -> Tensor"
  7379. },
  7380. {
  7381. "name": "llama::custom_sdpa.out(Tensor query, Tensor key, Tensor value, SymInt start_pos, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(a!) out) -> Tensor(a!)"
  7382. },
  7383. {
  7384. "name": "llama::custom_sdpa(Tensor query, Tensor key, Tensor value, SymInt start_pos, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None) -> Tensor"
  7385. },
  7386. {
  7387. "name": "llama::fast_hadamard_transform.out(Tensor mat, *, Tensor(a!) out) -> Tensor(a!)"
  7388. },
  7389. {
  7390. "name": "llama::sdpa.out(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(a!) out) -> Tensor(a!)"
  7391. },
  7392. {
  7393. "name": "llama::sdpa_with_kv_cache.out(Tensor query, Tensor key, Tensor value, Tensor(a!) key_cache, Tensor(b!) value_cache, SymInt start_pos, SymInt seq_len, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(c!) out) -> Tensor(c!)"
  7394. },
  7395. {
  7396. "name": "llama::sdpa_with_kv_cache(Tensor query, Tensor key, Tensor value, Tensor(a!) key_cache, Tensor(b!) value_cache, SymInt start_pos, SymInt seq_len, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None) -> Tensor"
  7397. },
  7398. {
  7399. "name": "llama::update_cache.out(Tensor value, Tensor(a!) cache, SymInt start_pos, *, Tensor(b!) out) -> Tensor(b!)"
  7400. },
  7401. {
  7402. "name": "llama::update_cache(Tensor value, Tensor(a!) cache, SymInt start_pos) -> Tensor"
  7403. },
  7404. {
  7405. "name": "neuron::_execute_neuron(__torch__.torch.classes.neuron.Model _0, Tensor[] _1) -> Tensor[] _0"
  7406. },
  7407. {
  7408. "name": "neuron::_from_neuron(Tensor _0) -> Tensor _0"
  7409. },
  7410. {
  7411. "name": "neuron::_init_neuron() -> ()"
  7412. },
  7413. {
  7414. "name": "neuron::_load_collectives_neuron(__torch__.torch.classes.neuron.Model _0, int _1, int _2, int _3, int _4) -> ()"
  7415. },
  7416. {
  7417. "name": "neuron::_load_neuron(__torch__.torch.classes.neuron.Model _0) -> ()"
  7418. },
  7419. {
  7420. "name": "neuron::_parallel_executor_run(__torch__.torch.classes.neuron.ParallelExecutor _0, Tensor[] _1, int _2) -> Tensor[] _0"
  7421. },
  7422. {
  7423. "name": "neuron::_parallel_from_neuron(Tensor _0) -> Tensor[] _0"
  7424. },
  7425. {
  7426. "name": "neuron::_parallel_load(Dict(str, Tensor)[] _0) -> Dict(str, Tensor)[] _0"
  7427. },
  7428. {
  7429. "name": "neuron::_parallel_profile_start_neuron(__torch__.torch.classes.neuron.ParallelModel _0, str _1, int _2) -> str[] _0"
  7430. },
  7431. {
  7432. "name": "neuron::_parallel_profile_stop_neuron(str[] _0) -> ()"
  7433. },
  7434. {
  7435. "name": "neuron::_parallel_run_neuron(__torch__.torch.classes.neuron.ParallelModel _0, __torch__.torch.classes.neuron.ParallelTensorSet _1, __torch__.torch.classes.neuron.ParallelTensorSet _2) -> ()"
  7436. },
  7437. {
  7438. "name": "neuron::_parallel_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  7439. },
  7440. {
  7441. "name": "neuron::_parallel_to_neuron(Tensor[] _0) -> Tensor _0"
  7442. },
  7443. {
  7444. "name": "neuron::_parallel_write_neuron(Tensor _0, Tensor[] _1) -> ()"
  7445. },
  7446. {
  7447. "name": "neuron::_profile_start_neuron(__torch__.torch.classes.neuron.Model _0, str _1) -> ()"
  7448. },
  7449. {
  7450. "name": "neuron::_profile_stop_neuron(str _0) -> ()"
  7451. },
  7452. {
  7453. "name": "neuron::_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  7454. },
  7455. {
  7456. "name": "neuron::_to_neuron(Tensor _0, int _1) -> Tensor _0"
  7457. },
  7458. {
  7459. "name": "neuron::create_module_from_graph(str _0, str _1) -> str _0"
  7460. },
  7461. {
  7462. "name": "neuron::forward_1(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> Tensor _0"
  7463. },
  7464. {
  7465. "name": "neuron::forward_10(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  7466. },
  7467. {
  7468. "name": "neuron::forward_11(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  7469. },
  7470. {
  7471. "name": "neuron::forward_12(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  7472. },
  7473. {
  7474. "name": "neuron::forward_13(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  7475. },
  7476. {
  7477. "name": "neuron::forward_14(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  7478. },
  7479. {
  7480. "name": "neuron::forward_15(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  7481. },
  7482. {
  7483. "name": "neuron::forward_16(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  7484. },
  7485. {
  7486. "name": "neuron::forward_17(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  7487. },
  7488. {
  7489. "name": "neuron::forward_18(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  7490. },
  7491. {
  7492. "name": "neuron::forward_19(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  7493. },
  7494. {
  7495. "name": "neuron::forward_2(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  7496. },
  7497. {
  7498. "name": "neuron::forward_20(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  7499. },
  7500. {
  7501. "name": "neuron::forward_21(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  7502. },
  7503. {
  7504. "name": "neuron::forward_22(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  7505. },
  7506. {
  7507. "name": "neuron::forward_23(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  7508. },
  7509. {
  7510. "name": "neuron::forward_24(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  7511. },
  7512. {
  7513. "name": "neuron::forward_25(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  7514. },
  7515. {
  7516. "name": "neuron::forward_26(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  7517. },
  7518. {
  7519. "name": "neuron::forward_27(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  7520. },
  7521. {
  7522. "name": "neuron::forward_28(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  7523. },
  7524. {
  7525. "name": "neuron::forward_29(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  7526. },
  7527. {
  7528. "name": "neuron::forward_3(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  7529. },
  7530. {
  7531. "name": "neuron::forward_30(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  7532. },
  7533. {
  7534. "name": "neuron::forward_31(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  7535. },
  7536. {
  7537. "name": "neuron::forward_32(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  7538. },
  7539. {
  7540. "name": "neuron::forward_33(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  7541. },
  7542. {
  7543. "name": "neuron::forward_34(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33)"
  7544. },
  7545. {
  7546. "name": "neuron::forward_35(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  7547. },
  7548. {
  7549. "name": "neuron::forward_36(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  7550. },
  7551. {
  7552. "name": "neuron::forward_37(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  7553. },
  7554. {
  7555. "name": "neuron::forward_38(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  7556. },
  7557. {
  7558. "name": "neuron::forward_39(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  7559. },
  7560. {
  7561. "name": "neuron::forward_4(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7562. },
  7563. {
  7564. "name": "neuron::forward_40(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7565. },
  7566. {
  7567. "name": "neuron::forward_41(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7568. },
  7569. {
  7570. "name": "neuron::forward_42(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7571. },
  7572. {
  7573. "name": "neuron::forward_43(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7574. },
  7575. {
  7576. "name": "neuron::forward_44(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7577. },
  7578. {
  7579. "name": "neuron::forward_45(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7580. },
  7581. {
  7582. "name": "neuron::forward_46(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7583. },
  7584. {
  7585. "name": "neuron::forward_47(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7586. },
  7587. {
  7588. "name": "neuron::forward_48(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7589. },
  7590. {
  7591. "name": "neuron::forward_49(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7592. },
  7593. {
  7594. "name": "neuron::forward_5(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7595. },
  7596. {
  7597. "name": "neuron::forward_50(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7598. },
  7599. {
  7600. "name": "neuron::forward_51(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7601. },
  7602. {
  7603. "name": "neuron::forward_52(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7604. },
  7605. {
  7606. "name": "neuron::forward_53(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7607. },
  7608. {
  7609. "name": "neuron::forward_54(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7610. },
  7611. {
  7612. "name": "neuron::forward_55(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7613. },
  7614. {
  7615. "name": "neuron::forward_56(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7616. },
  7617. {
  7618. "name": "neuron::forward_57(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7619. },
  7620. {
  7621. "name": "neuron::forward_58(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7622. },
  7623. {
  7624. "name": "neuron::forward_59(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7625. },
  7626. {
  7627. "name": "neuron::forward_6(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7628. },
  7629. {
  7630. "name": "neuron::forward_60(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7631. },
  7632. {
  7633. "name": "neuron::forward_61(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7634. },
  7635. {
  7636. "name": "neuron::forward_62(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7637. },
  7638. {
  7639. "name": "neuron::forward_63(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7640. },
  7641. {
  7642. "name": "neuron::forward_64(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7643. },
  7644. {
  7645. "name": "neuron::forward_7(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7646. },
  7647. {
  7648. "name": "neuron::forward_8(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7649. },
  7650. {
  7651. "name": "neuron::forward_9(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7652. },
  7653. {
  7654. "name": "neuron::forward_v2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor[] _0"
  7655. },
  7656. {
  7657. "name": "neuron::forward_v2_1(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor _0"
  7658. },
  7659. {
  7660. "name": "neuron::forward_v2_10(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  7661. },
  7662. {
  7663. "name": "neuron::forward_v2_11(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  7664. },
  7665. {
  7666. "name": "neuron::forward_v2_12(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  7667. },
  7668. {
  7669. "name": "neuron::forward_v2_13(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  7670. },
  7671. {
  7672. "name": "neuron::forward_v2_14(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  7673. },
  7674. {
  7675. "name": "neuron::forward_v2_15(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  7676. },
  7677. {
  7678. "name": "neuron::forward_v2_16(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  7679. },
  7680. {
  7681. "name": "neuron::forward_v2_17(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  7682. },
  7683. {
  7684. "name": "neuron::forward_v2_18(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  7685. },
  7686. {
  7687. "name": "neuron::forward_v2_19(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  7688. },
  7689. {
  7690. "name": "neuron::forward_v2_2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1)"
  7691. },
  7692. {
  7693. "name": "neuron::forward_v2_20(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  7694. },
  7695. {
  7696. "name": "neuron::forward_v2_21(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  7697. },
  7698. {
  7699. "name": "neuron::forward_v2_22(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  7700. },
  7701. {
  7702. "name": "neuron::forward_v2_23(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  7703. },
  7704. {
  7705. "name": "neuron::forward_v2_24(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  7706. },
  7707. {
  7708. "name": "neuron::forward_v2_25(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  7709. },
  7710. {
  7711. "name": "neuron::forward_v2_26(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  7712. },
  7713. {
  7714. "name": "neuron::forward_v2_27(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  7715. },
  7716. {
  7717. "name": "neuron::forward_v2_28(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  7718. },
  7719. {
  7720. "name": "neuron::forward_v2_29(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  7721. },
  7722. {
  7723. "name": "neuron::forward_v2_3(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2)"
  7724. },
  7725. {
  7726. "name": "neuron::forward_v2_30(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  7727. },
  7728. {
  7729. "name": "neuron::forward_v2_31(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  7730. },
  7731. {
  7732. "name": "neuron::forward_v2_32(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  7733. },
  7734. {
  7735. "name": "neuron::forward_v2_33(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  7736. },
  7737. {
  7738. "name": "neuron::forward_v2_35(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  7739. },
  7740. {
  7741. "name": "neuron::forward_v2_36(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  7742. },
  7743. {
  7744. "name": "neuron::forward_v2_37(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  7745. },
  7746. {
  7747. "name": "neuron::forward_v2_38(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  7748. },
  7749. {
  7750. "name": "neuron::forward_v2_39(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  7751. },
  7752. {
  7753. "name": "neuron::forward_v2_4(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7754. },
  7755. {
  7756. "name": "neuron::forward_v2_40(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7757. },
  7758. {
  7759. "name": "neuron::forward_v2_41(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7760. },
  7761. {
  7762. "name": "neuron::forward_v2_42(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7763. },
  7764. {
  7765. "name": "neuron::forward_v2_43(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7766. },
  7767. {
  7768. "name": "neuron::forward_v2_44(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7769. },
  7770. {
  7771. "name": "neuron::forward_v2_45(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7772. },
  7773. {
  7774. "name": "neuron::forward_v2_46(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7775. },
  7776. {
  7777. "name": "neuron::forward_v2_47(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7778. },
  7779. {
  7780. "name": "neuron::forward_v2_48(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7781. },
  7782. {
  7783. "name": "neuron::forward_v2_49(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7784. },
  7785. {
  7786. "name": "neuron::forward_v2_5(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7787. },
  7788. {
  7789. "name": "neuron::forward_v2_50(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7790. },
  7791. {
  7792. "name": "neuron::forward_v2_51(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7793. },
  7794. {
  7795. "name": "neuron::forward_v2_52(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7796. },
  7797. {
  7798. "name": "neuron::forward_v2_53(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7799. },
  7800. {
  7801. "name": "neuron::forward_v2_54(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7802. },
  7803. {
  7804. "name": "neuron::forward_v2_55(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7805. },
  7806. {
  7807. "name": "neuron::forward_v2_56(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7808. },
  7809. {
  7810. "name": "neuron::forward_v2_57(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7811. },
  7812. {
  7813. "name": "neuron::forward_v2_58(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7814. },
  7815. {
  7816. "name": "neuron::forward_v2_59(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7817. },
  7818. {
  7819. "name": "neuron::forward_v2_6(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7820. },
  7821. {
  7822. "name": "neuron::forward_v2_60(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7823. },
  7824. {
  7825. "name": "neuron::forward_v2_61(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7826. },
  7827. {
  7828. "name": "neuron::forward_v2_62(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7829. },
  7830. {
  7831. "name": "neuron::forward_v2_63(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7832. },
  7833. {
  7834. "name": "neuron::forward_v2_64(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7835. },
  7836. {
  7837. "name": "neuron::forward_v2_7(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7838. },
  7839. {
  7840. "name": "neuron::forward_v2_8(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7841. },
  7842. {
  7843. "name": "neuron::forward_v2_9(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7844. },
  7845. {
  7846. "name": "neuron::rnn(Tensor _0, Tensor[] _1, __torch__.torch.classes.neuron.RnnBinding _2, int _3) -> (Tensor _0, Tensor[] _1)"
  7847. },
  7848. {
  7849. "name": "neuron::rnn_v2(Tensor _0, Tensor _1, Tensor _2, int _3, __torch__.torch.classes.neuron.RnnBinding_v2[] _4) -> (Tensor _0, Tensor _1, Tensor _2)"
  7850. },
  7851. {
  7852. "name": "prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.Conv2dOpContext"
  7853. },
  7854. {
  7855. "name": "prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack) -> Tensor Y",
  7856. "category": "Layer"
  7857. },
  7858. {
  7859. "name": "prepacked::conv2d_transpose_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.TransposeConv2dOpContext"
  7860. },
  7861. {
  7862. "name": "prepacked::conv2d_transpose_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.TransposeConv2dOpContext W_prepack) -> Tensor Y",
  7863. "category": "Layer"
  7864. },
  7865. {
  7866. "name": "prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.LinearOpContext"
  7867. },
  7868. {
  7869. "name": "prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.LinearOpContext W_prepack) -> Tensor Y",
  7870. "category": "Layer"
  7871. },
  7872. {
  7873. "name": "prim::AutogradAdd(Any a, Any b) -> Any"
  7874. },
  7875. {
  7876. "name": "prim::AutogradAllNonZero(...) -> bool"
  7877. },
  7878. {
  7879. "name": "prim::AutogradAllZero(...) -> bool"
  7880. },
  7881. {
  7882. "name": "prim::AutogradAnyNonZero(...) -> bool"
  7883. },
  7884. {
  7885. "name": "prim::AutogradZero() -> Tensor"
  7886. },
  7887. {
  7888. "name": "prim::BroadcastSizes(...) -> int[]"
  7889. },
  7890. {
  7891. "name": "prim::ConstantChunk(...) -> ..."
  7892. },
  7893. {
  7894. "name": "prim::ConstantMKLDNNTensor(...) -> ..."
  7895. },
  7896. {
  7897. "name": "prim::EnumName(AnyEnumType enum) -> str"
  7898. },
  7899. {
  7900. "name": "prim::EnumValue.int(AnyEnumType enum) -> int"
  7901. },
  7902. {
  7903. "name": "prim::EnumValue.float(AnyEnumType enum) -> float"
  7904. },
  7905. {
  7906. "name": "prim::EnumValue.str(AnyEnumType enum) -> str"
  7907. },
  7908. {
  7909. "name": "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)"
  7910. },
  7911. {
  7912. "name": "prim::ModuleContainerIndex.list(Any self, int ind) -> Any"
  7913. },
  7914. {
  7915. "name": "prim::ModuleContainerIndex.dict(Any self, str ind) -> Any"
  7916. },
  7917. {
  7918. "name": "prim::NumToTensor.Scalar(Scalar a) -> Tensor"
  7919. },
  7920. {
  7921. "name": "prim::NumToTensor.bool(bool a) -> Tensor"
  7922. },
  7923. {
  7924. "name": "prim::Print(...) -> ()"
  7925. },
  7926. {
  7927. "name": "prim::RaiseException(str msg, str? cls=None) -> ()"
  7928. },
  7929. {
  7930. "name": "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim=False) -> int[]"
  7931. },
  7932. {
  7933. "name": "prim::StringIndex(str string, int index) -> str"
  7934. },
  7935. {
  7936. "name": "prim::TupleIndex(Any tup, int i) -> Any"
  7937. },
  7938. {
  7939. "name": "prim::TupleUnpack(Any tup) -> ..."
  7940. },
  7941. {
  7942. "name": "prim::Uninitialized() -> Any"
  7943. },
  7944. {
  7945. "name": "prim::VarConcat(...) -> Tensor"
  7946. },
  7947. {
  7948. "name": "prim::VarStack(...) -> Tensor"
  7949. },
  7950. {
  7951. "name": "prim::abs.int(int a) -> int"
  7952. },
  7953. {
  7954. "name": "prim::abs.float(float a) -> float"
  7955. },
  7956. {
  7957. "name": "prim::abs.complex(complex a) -> float"
  7958. },
  7959. {
  7960. "name": "prim::abs.Scalar(Scalar a) -> Scalar"
  7961. },
  7962. {
  7963. "name": "prim::abs(Tensor x) -> Tensor"
  7964. },
  7965. {
  7966. "name": "prim::data(Tensor(a) a) -> Tensor(a)"
  7967. },
  7968. {
  7969. "name": "prim::device(Tensor a) -> Device"
  7970. },
  7971. {
  7972. "name": "prim::dtype(Tensor a) -> int"
  7973. },
  7974. {
  7975. "name": "prim::grad(Tensor a) -> Tensor(*)"
  7976. },
  7977. {
  7978. "name": "prim::id(AnyClassType? x) -> int"
  7979. },
  7980. {
  7981. "name": "prim::index(Device self) -> int?"
  7982. },
  7983. {
  7984. "name": "prim::is_cpu(Tensor a) -> bool"
  7985. },
  7986. {
  7987. "name": "prim::is_cuda(Tensor a) -> bool"
  7988. },
  7989. {
  7990. "name": "prim::is_ipu(Tensor a) -> bool"
  7991. },
  7992. {
  7993. "name": "prim::is_maia(Tensor a) -> bool"
  7994. },
  7995. {
  7996. "name": "prim::is_meta(Tensor a) -> bool"
  7997. },
  7998. {
  7999. "name": "prim::is_mkldnn(Tensor a) -> bool"
  8000. },
  8001. {
  8002. "name": "prim::is_mps(Tensor a) -> bool"
  8003. },
  8004. {
  8005. "name": "prim::is_mtia(Tensor a) -> bool"
  8006. },
  8007. {
  8008. "name": "prim::is_nested(Tensor a) -> bool"
  8009. },
  8010. {
  8011. "name": "prim::is_quantized(Tensor a) -> bool"
  8012. },
  8013. {
  8014. "name": "prim::is_sparse(Tensor a) -> bool"
  8015. },
  8016. {
  8017. "name": "prim::is_sparse_csr(Tensor a) -> bool"
  8018. },
  8019. {
  8020. "name": "prim::is_vulkan(Tensor a) -> bool"
  8021. },
  8022. {
  8023. "name": "prim::is_xla(Tensor a) -> bool"
  8024. },
  8025. {
  8026. "name": "prim::is_xpu(Tensor a) -> bool"
  8027. },
  8028. {
  8029. "name": "prim::isinstance(Any to_check) -> bool"
  8030. },
  8031. {
  8032. "name": "prim::itemsize(Tensor a) -> int"
  8033. },
  8034. {
  8035. "name": "prim::layout(Tensor a) -> Layout"
  8036. },
  8037. {
  8038. "name": "prim::max.int(int a, int b) -> int"
  8039. },
  8040. {
  8041. "name": "prim::max.float(float a, float b) -> float"
  8042. },
  8043. {
  8044. "name": "prim::max.int_float(int a, float b) -> float"
  8045. },
  8046. {
  8047. "name": "prim::max.float_int(float a, int b) -> float"
  8048. },
  8049. {
  8050. "name": "prim::max(Scalar a, Scalar b) -> Scalar"
  8051. },
  8052. {
  8053. "name": "prim::max.int_list(int[] l, int[] r) -> int[]"
  8054. },
  8055. {
  8056. "name": "prim::max.self_int(int[] self) -> int"
  8057. },
  8058. {
  8059. "name": "prim::max.float_list(float[] l, float[] r) -> float[]"
  8060. },
  8061. {
  8062. "name": "prim::max.self_float(float[] self) -> float"
  8063. },
  8064. {
  8065. "name": "prim::max.bool_list(bool[] l, bool[] r) -> bool[]"
  8066. },
  8067. {
  8068. "name": "prim::max.self_bool(bool[] self) -> bool"
  8069. },
  8070. {
  8071. "name": "prim::min.int(int a, int b) -> int"
  8072. },
  8073. {
  8074. "name": "prim::min.float(float a, float b) -> float"
  8075. },
  8076. {
  8077. "name": "prim::min.int_float(int a, float b) -> float"
  8078. },
  8079. {
  8080. "name": "prim::min.float_int(float a, int b) -> float"
  8081. },
  8082. {
  8083. "name": "prim::min(Scalar a, Scalar b) -> Scalar"
  8084. },
  8085. {
  8086. "name": "prim::min.int_list(int[] l, int[] r) -> int[]"
  8087. },
  8088. {
  8089. "name": "prim::min.self_int(int[] self) -> int"
  8090. },
  8091. {
  8092. "name": "prim::min.float_list(float[] l, float[] r) -> float[]"
  8093. },
  8094. {
  8095. "name": "prim::min.self_float(float[] self) -> float"
  8096. },
  8097. {
  8098. "name": "prim::min.bool_list(bool[] l, bool[] r) -> bool[]"
  8099. },
  8100. {
  8101. "name": "prim::min.self_bool(bool[] self) -> bool"
  8102. },
  8103. {
  8104. "name": "prim::mkldnn_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor"
  8105. },
  8106. {
  8107. "name": "prim::name(Tensor a) -> str?"
  8108. },
  8109. {
  8110. "name": "prim::nbytes(Tensor a) -> int"
  8111. },
  8112. {
  8113. "name": "prim::rangelist(int n) -> int[]"
  8114. },
  8115. {
  8116. "name": "prim::requires_grad(Tensor a) -> bool"
  8117. },
  8118. {
  8119. "name": "prim::shape(Tensor self) -> int[]"
  8120. },
  8121. {
  8122. "name": "prim::tolist(...) -> ..."
  8123. },
  8124. {
  8125. "name": "prim::type(Device self) -> str"
  8126. },
  8127. {
  8128. "name": "prim::unchecked_cast(t x) -> t"
  8129. },
  8130. {
  8131. "name": "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"
  8132. },
  8133. {
  8134. "name": "prims::collapse(Tensor a, int start, int end) -> Tensor"
  8135. },
  8136. {
  8137. "name": "profiler::_record_function_enter_new(str name, str? args=None) -> __torch__.torch.classes.profiler._RecordFunction"
  8138. },
  8139. {
  8140. "name": "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8141. },
  8142. {
  8143. "name": "quantized::add.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8144. },
  8145. {
  8146. "name": "quantized::add.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8147. },
  8148. {
  8149. "name": "quantized::add.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8150. },
  8151. {
  8152. "name": "quantized::add.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8153. },
  8154. {
  8155. "name": "quantized::add_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8156. },
  8157. {
  8158. "name": "quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8159. },
  8160. {
  8161. "name": "quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8162. },
  8163. {
  8164. "name": "quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8165. },
  8166. {
  8167. "name": "quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8168. },
  8169. {
  8170. "name": "quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8171. },
  8172. {
  8173. "name": "quantized::add_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8174. },
  8175. {
  8176. "name": "quantized::add_scalar(Tensor qa, Scalar b) -> Tensor qc"
  8177. },
  8178. {
  8179. "name": "quantized::add_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8180. },
  8181. {
  8182. "name": "quantized::add_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8183. },
  8184. {
  8185. "name": "quantized::add_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8186. },
  8187. {
  8188. "name": "quantized::add_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  8189. },
  8190. {
  8191. "name": "quantized::add_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8192. },
  8193. {
  8194. "name": "quantized::add_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8195. },
  8196. {
  8197. "name": "quantized::add_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8198. },
  8199. {
  8200. "name": "quantized::batch_norm(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor"
  8201. },
  8202. {
  8203. "name": "quantized::batch_norm1d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8204. "category": "Normalization"
  8205. },
  8206. {
  8207. "name": "quantized::batch_norm1d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8208. "category": "Normalization"
  8209. },
  8210. {
  8211. "name": "quantized::batch_norm2d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8212. "category": "Normalization"
  8213. },
  8214. {
  8215. "name": "quantized::batch_norm2d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8216. "category": "Normalization"
  8217. },
  8218. {
  8219. "name": "quantized::batch_norm3d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8220. "category": "Normalization"
  8221. },
  8222. {
  8223. "name": "quantized::batch_norm3d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8224. "category": "Normalization"
  8225. },
  8226. {
  8227. "name": "quantized::batch_norm_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  8228. "category": "Normalization"
  8229. },
  8230. {
  8231. "name": "quantized::cat(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  8232. "category": "Tensor"
  8233. },
  8234. {
  8235. "name": "quantized::cat_relu(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  8236. "category": "Tensor"
  8237. },
  8238. {
  8239. "name": "quantized::celu(Tensor self, float output_scale, int output_zero_point, Scalar alpha=1) -> Tensor",
  8240. "category": "Activation"
  8241. },
  8242. {
  8243. "name": "quantized::conv1d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8244. "category": "Layer"
  8245. },
  8246. {
  8247. "name": "quantized::conv1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  8248. },
  8249. {
  8250. "name": "quantized::conv1d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8251. "category": "Layer"
  8252. },
  8253. {
  8254. "name": "quantized::conv1d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  8255. },
  8256. {
  8257. "name": "quantized::conv2d.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8258. "category": "Layer"
  8259. },
  8260. {
  8261. "name": "quantized::conv2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  8262. "category": "Layer"
  8263. },
  8264. {
  8265. "name": "quantized::conv2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8266. },
  8267. {
  8268. "name": "quantized::conv2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  8269. },
  8270. {
  8271. "name": "quantized::conv2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  8272. },
  8273. {
  8274. "name": "quantized::conv2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8275. },
  8276. {
  8277. "name": "quantized::conv2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8278. },
  8279. {
  8280. "name": "quantized::conv2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  8281. },
  8282. {
  8283. "name": "quantized::conv2d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8284. "category": "Layer"
  8285. },
  8286. {
  8287. "name": "quantized::conv2d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  8288. "category": "Layer"
  8289. },
  8290. {
  8291. "name": "quantized::conv2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8292. },
  8293. {
  8294. "name": "quantized::conv2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  8295. },
  8296. {
  8297. "name": "quantized::conv2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  8298. },
  8299. {
  8300. "name": "quantized::conv2d_unpack_sizes(Any packed_weights) -> Any"
  8301. },
  8302. {
  8303. "name": "quantized::conv3d.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8304. "category": "Layer"
  8305. },
  8306. {
  8307. "name": "quantized::conv3d(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  8308. "category": "Layer"
  8309. },
  8310. {
  8311. "name": "quantized::conv3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase"
  8312. },
  8313. {
  8314. "name": "quantized::conv3d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  8315. "category": "Layer"
  8316. },
  8317. {
  8318. "name": "quantized::conv3d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  8319. "category": "Layer"
  8320. },
  8321. {
  8322. "name": "quantized::conv_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  8323. },
  8324. {
  8325. "name": "quantized::conv_transpose1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  8326. },
  8327. {
  8328. "name": "quantized::conv_transpose2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor"
  8329. },
  8330. {
  8331. "name": "quantized::conv_transpose2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8332. },
  8333. {
  8334. "name": "quantized::conv_transpose2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  8335. },
  8336. {
  8337. "name": "quantized::conv_transpose2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  8338. },
  8339. {
  8340. "name": "quantized::conv_transpose2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8341. },
  8342. {
  8343. "name": "quantized::conv_transpose2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8344. },
  8345. {
  8346. "name": "quantized::conv_transpose2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  8347. },
  8348. {
  8349. "name": "quantized::conv_transpose2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8350. },
  8351. {
  8352. "name": "quantized::conv_transpose2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  8353. },
  8354. {
  8355. "name": "quantized::conv_transpose2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  8356. },
  8357. {
  8358. "name": "quantized::conv_transpose3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase"
  8359. },
  8360. {
  8361. "name": "quantized::dropout(Tensor self, float output_scale, int output_zero_point, Scalar p=0.5, bool training=False) -> Tensor"
  8362. },
  8363. {
  8364. "name": "quantized::embedding_bag_4bit_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor",
  8365. "category": "Transform"
  8366. },
  8367. {
  8368. "name": "quantized::embedding_bag_byte_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor",
  8369. "category": "Transform"
  8370. },
  8371. {
  8372. "name": "quantized::embedding_bag_prepack(Tensor weight) -> __torch__.torch.classes.quantized.EmbeddingPackedParamsBase W_prepack"
  8373. },
  8374. {
  8375. "name": "quantized::embedding_bag_unpack(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase W_prepack) -> Tensor W_origin"
  8376. },
  8377. {
  8378. "name": "quantized::embedding_byte(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase weight, Tensor indices, bool pruned_weights=False) -> Tensor",
  8379. "category": "Transform"
  8380. },
  8381. {
  8382. "name": "quantized::hardswish(Tensor input, float output_scale, int output_zero_point) -> Tensor",
  8383. "category": "Activation"
  8384. },
  8385. {
  8386. "name": "quantized::instance_norm(Tensor input, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor"
  8387. },
  8388. {
  8389. "name": "quantized::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor",
  8390. "category": "Normalization"
  8391. },
  8392. {
  8393. "name": "quantized::leaky_relu(Tensor qx, Scalar negative_slope, bool inplace, float output_scale, int output_zero_point) -> Tensor",
  8394. "category": "Activation"
  8395. },
  8396. {
  8397. "name": "quantized::linear(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  8398. "category": "Layer"
  8399. },
  8400. {
  8401. "name": "quantized::linear_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  8402. "category": "Layer"
  8403. },
  8404. {
  8405. "name": "quantized::linear_dynamic_fp16(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> Tensor Y"
  8406. },
  8407. {
  8408. "name": "quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  8409. },
  8410. {
  8411. "name": "quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  8412. },
  8413. {
  8414. "name": "quantized::linear_prepack_fp16_legacy(Tensor W, Tensor? B=None) -> Tensor W_prepack"
  8415. },
  8416. {
  8417. "name": "quantized::linear_relu(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  8418. "category": "Layer"
  8419. },
  8420. {
  8421. "name": "quantized::linear_relu_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  8422. "category": "Layer"
  8423. },
  8424. {
  8425. "name": "quantized::linear_unpack(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8426. },
  8427. {
  8428. "name": "quantized::linear_unpack.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8429. },
  8430. {
  8431. "name": "quantized::linear_unpack_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8432. },
  8433. {
  8434. "name": "quantized::linear_unpack_fp16.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8435. },
  8436. {
  8437. "name": "quantized::make_quantized_cell_params(Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  8438. },
  8439. {
  8440. "name": "quantized::make_quantized_cell_params_dynamic(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh, bool reduce_range=False) -> __torch__.torch.classes.rnn.CellParamsBase"
  8441. },
  8442. {
  8443. "name": "quantized::make_quantized_cell_params_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  8444. },
  8445. {
  8446. "name": "quantized::matmul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8447. },
  8448. {
  8449. "name": "quantized::mul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8450. },
  8451. {
  8452. "name": "quantized::mul.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8453. },
  8454. {
  8455. "name": "quantized::mul.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8456. },
  8457. {
  8458. "name": "quantized::mul.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8459. },
  8460. {
  8461. "name": "quantized::mul.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8462. },
  8463. {
  8464. "name": "quantized::mul_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8465. },
  8466. {
  8467. "name": "quantized::mul_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8468. },
  8469. {
  8470. "name": "quantized::mul_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8471. },
  8472. {
  8473. "name": "quantized::mul_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8474. },
  8475. {
  8476. "name": "quantized::mul_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8477. },
  8478. {
  8479. "name": "quantized::mul_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8480. },
  8481. {
  8482. "name": "quantized::mul_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8483. },
  8484. {
  8485. "name": "quantized::mul_scalar(Tensor qa, Scalar b) -> Tensor qc"
  8486. },
  8487. {
  8488. "name": "quantized::mul_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8489. },
  8490. {
  8491. "name": "quantized::mul_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8492. },
  8493. {
  8494. "name": "quantized::mul_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8495. },
  8496. {
  8497. "name": "quantized::mul_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  8498. },
  8499. {
  8500. "name": "quantized::mul_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8501. },
  8502. {
  8503. "name": "quantized::mul_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8504. },
  8505. {
  8506. "name": "quantized::mul_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8507. },
  8508. {
  8509. "name": "quantized::prelu(Tensor qx, Tensor weight, float output_scale, int output_zero_point) -> Tensor"
  8510. },
  8511. {
  8512. "name": "quantized::quantized_gru_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8513. },
  8514. {
  8515. "name": "quantized::quantized_lstm_cell_dynamic(Tensor input, Tensor[] hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh) -> (Tensor, Tensor)"
  8516. },
  8517. {
  8518. "name": "quantized::quantized_rnn_relu_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8519. },
  8520. {
  8521. "name": "quantized::quantized_rnn_tanh_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8522. },
  8523. {
  8524. "name": "quantized::relu6(Tensor qx, bool inplace=False) -> Tensor",
  8525. "category": "Activation"
  8526. },
  8527. {
  8528. "name": "quantized::sigmoid(Tensor qx, float output_scale, int output_zero_point) -> Tensor",
  8529. "category": "Activation"
  8530. },
  8531. {
  8532. "name": "quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor"
  8533. },
  8534. {
  8535. "name": "quantized_decomposed::_choose_qparams_per_token_asymmetric_impl(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8536. },
  8537. {
  8538. "name": "quantized_decomposed::add(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, Tensor b, float b_scale, int b_zero_point, int b_quant_min, int b_quant_max, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max) -> Tensor qc"
  8539. },
  8540. {
  8541. "name": "quantized_decomposed::add.scalar(Tensor qa, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, ScalarType a_dtype, Scalar b, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max, ScalarType out_dtype) -> Tensor"
  8542. },
  8543. {
  8544. "name": "quantized_decomposed::add_relu(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, Tensor b, float b_scale, int b_zero_point, int b_quant_min, int b_quant_max, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max) -> Tensor qc"
  8545. },
  8546. {
  8547. "name": "quantized_decomposed::choose_qparams.tensor(Tensor input, int quant_min, int quant_max, float eps, ScalarType dtype) -> (Tensor, Tensor)"
  8548. },
  8549. {
  8550. "name": "quantized_decomposed::choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8551. },
  8552. {
  8553. "name": "quantized_decomposed::choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8554. },
  8555. {
  8556. "name": "quantized_decomposed::choose_qparams_per_token_asymmetric.out(Tensor input, ScalarType dtype, *, Tensor(a!) scale_out, Tensor(b!) zero_point_out) -> (Tensor(a!), Tensor(b!))"
  8557. },
  8558. {
  8559. "name": "quantized_decomposed::choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, float eps, ScalarType dtype) -> (Tensor, Tensor)"
  8560. },
  8561. {
  8562. "name": "quantized_decomposed::dequantize_per_channel.out(Tensor input, Tensor scales, Tensor? zero_points, int axis, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8563. },
  8564. {
  8565. "name": "quantized_decomposed::dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8566. },
  8567. {
  8568. "name": "quantized_decomposed::dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
  8569. },
  8570. {
  8571. "name": "quantized_decomposed::dequantize_per_tensor.Tensor_out(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8572. },
  8573. {
  8574. "name": "quantized_decomposed::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8575. },
  8576. {
  8577. "name": "quantized_decomposed::dequantize_per_tensor(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8578. },
  8579. {
  8580. "name": "quantized_decomposed::dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8581. },
  8582. {
  8583. "name": "quantized_decomposed::dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8584. },
  8585. {
  8586. "name": "quantized_decomposed::dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
  8587. },
  8588. {
  8589. "name": "quantized_decomposed::embedding_4bit(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices) -> Tensor",
  8590. "category": "Transform"
  8591. },
  8592. {
  8593. "name": "quantized_decomposed::embedding_4bit.dtype(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None) -> Tensor",
  8594. "category": "Transform"
  8595. },
  8596. {
  8597. "name": "quantized_decomposed::embedding_4bit.out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, Tensor(a!) out) -> Tensor(a!)",
  8598. "category": "Transform"
  8599. },
  8600. {
  8601. "name": "quantized_decomposed::embedding_4bit.dtype_out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)",
  8602. "category": "Transform"
  8603. },
  8604. {
  8605. "name": "quantized_decomposed::embedding_byte.dtype_out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)",
  8606. "category": "Transform"
  8607. },
  8608. {
  8609. "name": "quantized_decomposed::embedding_byte(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices) -> Tensor",
  8610. "category": "Transform"
  8611. },
  8612. {
  8613. "name": "quantized_decomposed::embedding_byte.dtype(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None) -> Tensor",
  8614. "category": "Transform"
  8615. },
  8616. {
  8617. "name": "quantized_decomposed::embedding_byte.out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, Tensor(a!) out) -> Tensor(a!)",
  8618. "category": "Transform"
  8619. },
  8620. {
  8621. "name": "quantized_decomposed::fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, int quant_min, int quant_max) -> Tensor"
  8622. },
  8623. {
  8624. "name": "quantized_decomposed::mixed_linear(Tensor input, Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, ScalarType? dtype=None) -> Tensor"
  8625. },
  8626. {
  8627. "name": "quantized_decomposed::mixed_mm(Tensor input, Tensor weight, Tensor weight_scales, Tensor? weight_zero_points) -> Tensor"
  8628. },
  8629. {
  8630. "name": "quantized_decomposed::quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8631. },
  8632. {
  8633. "name": "quantized_decomposed::quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype, int group_size) -> Tensor"
  8634. },
  8635. {
  8636. "name": "quantized_decomposed::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  8637. },
  8638. {
  8639. "name": "quantized_decomposed::quantize_per_tensor(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8640. },
  8641. {
  8642. "name": "quantized_decomposed::quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8643. },
  8644. {
  8645. "name": "quantized_decomposed::quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor"
  8646. },
  8647. {
  8648. "name": "quantized_decomposed::quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8649. },
  8650. {
  8651. "name": "tensorrt::execute_engine(Tensor[] inputs, __torch__.torch.classes.tensorrt.Engine engine) -> Tensor[]"
  8652. },
  8653. {
  8654. "name": "torch_scatter::cuda_version() -> int _0"
  8655. },
  8656. {
  8657. "name": "torch_scatter::gather_coo(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8658. },
  8659. {
  8660. "name": "torch_scatter::gather_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8661. },
  8662. {
  8663. "name": "torch_scatter::scatter_max(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  8664. },
  8665. {
  8666. "name": "torch_scatter::scatter_mean(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8667. },
  8668. {
  8669. "name": "torch_scatter::scatter_min(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  8670. },
  8671. {
  8672. "name": "torch_scatter::scatter_mul(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8673. },
  8674. {
  8675. "name": "torch_scatter::scatter_sum(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8676. },
  8677. {
  8678. "name": "torch_scatter::segment_max_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  8679. },
  8680. {
  8681. "name": "torch_scatter::segment_max_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  8682. },
  8683. {
  8684. "name": "torch_scatter::segment_mean_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  8685. },
  8686. {
  8687. "name": "torch_scatter::segment_mean_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8688. },
  8689. {
  8690. "name": "torch_scatter::segment_min_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  8691. },
  8692. {
  8693. "name": "torch_scatter::segment_min_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  8694. },
  8695. {
  8696. "name": "torch_scatter::segment_sum_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  8697. },
  8698. {
  8699. "name": "torch_scatter::segment_sum_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8700. },
  8701. {
  8702. "name": "torch_sparse::cuda_version() -> int _0"
  8703. },
  8704. {
  8705. "name": "torch_sparse::ego_k_hop_sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, int _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  8706. },
  8707. {
  8708. "name": "torch_sparse::hetero_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, int _6, bool _7, bool _8) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8709. },
  8710. {
  8711. "name": "torch_sparse::hetero_temporal_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, Dict(str, Tensor) _6, int _7, bool _8, bool _9) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8712. },
  8713. {
  8714. "name": "torch_sparse::hgt_sample(Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, int[]) _3, int _4) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8715. },
  8716. {
  8717. "name": "torch_sparse::ind2ptr(Tensor _0, int _1) -> Tensor _0"
  8718. },
  8719. {
  8720. "name": "torch_sparse::mt_partition(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5, int _6) -> Tensor _0"
  8721. },
  8722. {
  8723. "name": "torch_sparse::neighbor_sample(Tensor _0, Tensor _1, Tensor _2, int[] _3, bool _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  8724. },
  8725. {
  8726. "name": "torch_sparse::non_diag_mask(Tensor _0, Tensor _1, int _2, int _3, int _4) -> Tensor _0"
  8727. },
  8728. {
  8729. "name": "torch_sparse::partition(Tensor _0, Tensor _1, Tensor? _2, int _3, bool _4) -> Tensor _0"
  8730. },
  8731. {
  8732. "name": "torch_sparse::partition2(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5) -> Tensor _0"
  8733. },
  8734. {
  8735. "name": "torch_sparse::ptr2ind(Tensor _0, int _1) -> Tensor _0"
  8736. },
  8737. {
  8738. "name": "torch_sparse::random_walk(Tensor _0, Tensor _1, Tensor _2, int _3) -> Tensor _0"
  8739. },
  8740. {
  8741. "name": "torch_sparse::relabel(Tensor _0, Tensor _1) -> (Tensor _0, Tensor _1)"
  8742. },
  8743. {
  8744. "name": "torch_sparse::relabel_one_hop(Tensor _0, Tensor _1, Tensor? _2, Tensor _3, bool _4) -> (Tensor _0, Tensor _1, Tensor? _2, Tensor _3)"
  8745. },
  8746. {
  8747. "name": "torch_sparse::saint_subgraph(Tensor _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  8748. },
  8749. {
  8750. "name": "torch_sparse::sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, bool _4) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  8751. },
  8752. {
  8753. "name": "torch_sparse::spmm_max(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  8754. },
  8755. {
  8756. "name": "torch_sparse::spmm_mean(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor? _6, Tensor _7) -> Tensor _0"
  8757. },
  8758. {
  8759. "name": "torch_sparse::spmm_min(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  8760. },
  8761. {
  8762. "name": "torch_sparse::spmm_sum(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor _6) -> Tensor _0"
  8763. },
  8764. {
  8765. "name": "torchao::choose_qparams_affine(Tensor? input, str mapping_type, SymInt[] block_size, ScalarType target_dtype, Scalar? quant_min=None, Scalar? quant_max=None, float? eps=None, ScalarType? scale_dtype=None, ScalarType? zero_point_dtype=None, bool keepdim=False) -> (Tensor, Tensor)"
  8766. },
  8767. {
  8768. "name": "torchao::dequantize_affine(Tensor input, SymInt[] block_size, Tensor scale, Tensor? zero_point, ScalarType input_dtype, Scalar? quant_min=None, Scalar? quant_max=None, ScalarType output_dtype=6) -> Tensor"
  8769. },
  8770. {
  8771. "name": "torchao::quantize_affine(Tensor input, SymInt[] block_size, Tensor scale, Tensor? zero_point, ScalarType output_dtype, Scalar? quant_min=None, Scalar? quant_max=None) -> Tensor"
  8772. },
  8773. {
  8774. "name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)"
  8775. },
  8776. {
  8777. "name": "torchvision::_interpolate_bilinear2d_aa(Tensor input, int[] size, bool align_corners) -> Tensor"
  8778. },
  8779. {
  8780. "name": "torchvision::deform_conv2d(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, SymInt stride_h, SymInt stride_w, SymInt pad_h, SymInt pad_w, SymInt dilation_h, SymInt dilation_w, SymInt groups, SymInt offset_groups, bool use_mask) -> Tensor"
  8781. },
  8782. {
  8783. "name": "torchvision::deform_conv2d.out(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, SymInt stride_h, SymInt stride_w, SymInt pad_h, SymInt pad_w, SymInt dilation_h, SymInt dilation_w, SymInt groups, SymInt offset_groups, bool use_mask, *, Tensor(a!) out) -> Tensor(a!)"
  8784. },
  8785. {
  8786. "name": "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor"
  8787. },
  8788. {
  8789. "name": "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, SymInt pooled_height, SymInt pooled_width, int sampling_ratio, bool aligned) -> Tensor"
  8790. },
  8791. {
  8792. "name": "torchvision::roi_pool(Tensor input, Tensor rois, float spatial_scale, SymInt pooled_height, SymInt pooled_width) -> (Tensor, Tensor)"
  8793. },
  8794. {
  8795. "name": "vai::fix_neuron(Tensor input, int valmin, int valmax, float valamp, int zero_point, int method, int device_id, int inplace) -> Tensor"
  8796. }
  8797. ]