pytorch-metadata.json 318 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761
  1. [
  2. {
  3. "name": "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, SymInt pooled_height, SymInt pooled_width, int sampling_ratio, bool aligned) -> Tensor"
  4. },
  5. {
  6. "name": "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor"
  7. },
  8. {
  9. "name": "aten::set_grad_enabled(bool val) -> ()"
  10. },
  11. {
  12. "name": "aten::is_grad_enabled() -> bool"
  13. },
  14. {
  15. "name": "aten::is_scripting() -> bool"
  16. },
  17. {
  18. "name": "aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  19. },
  20. {
  21. "name": "aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  22. },
  23. {
  24. "name": "aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  25. },
  26. {
  27. "name": "aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  28. },
  29. {
  30. "name": "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"
  31. },
  32. {
  33. "name": "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  34. },
  35. {
  36. "name": "aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  37. },
  38. {
  39. "name": "aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  40. },
  41. {
  42. "name": "aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  43. },
  44. {
  45. "name": "aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  46. },
  47. {
  48. "name": "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  49. },
  50. {
  51. "name": "aten::__upsample_bilinear(Tensor input, int? size=None, int? scale_factor=None) -> Tensor"
  52. },
  53. {
  54. "name": "aten::__upsample_bilinear.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> Tensor"
  55. },
  56. {
  57. "name": "aten::__upsample_bilinear.scale_list(Tensor input, int? size=None, int[]? scale_factor=None) -> Tensor"
  58. },
  59. {
  60. "name": "aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size=None, int[]? scale_factor=None) -> Tensor"
  61. },
  62. {
  63. "name": "aten::__upsample(Tensor input, int? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  64. "category": "Layer"
  65. },
  66. {
  67. "name": "aten::__upsample.size_list(Tensor input, int[]? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  68. "category": "Layer"
  69. },
  70. {
  71. "name": "aten::__interpolate.scale_list(Tensor input, int? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  72. },
  73. {
  74. "name": "aten::__interpolate.size_list_scale_list(Tensor input, int[]? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  75. },
  76. {
  77. "name": "aten::__interpolate(Tensor input, int? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  78. },
  79. {
  80. "name": "aten::__interpolate.size_list(Tensor input, int[]? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  81. },
  82. {
  83. "name": "aten::wait(Future(t) self) -> t"
  84. },
  85. {
  86. "name": "prim::ModuleContainerIndex.list(Any self, int ind) -> Any"
  87. },
  88. {
  89. "name": "prim::ModuleContainerIndex.dict(Any self, str ind) -> Any"
  90. },
  91. {
  92. "name": "prim::id(AnyClassType? x) -> int"
  93. },
  94. {
  95. "name": "aten::divmod.int(int x, int y) -> (int, int)"
  96. },
  97. {
  98. "name": "aten::divmod.float(float x, float y) -> (float, float)"
  99. },
  100. {
  101. "name": "aten::divmod.int_float(int x, float y) -> (float, float)"
  102. },
  103. {
  104. "name": "aten::divmod.float_int(float x, int y) -> (float, float)"
  105. },
  106. {
  107. "name": "prim::abs.int(int a) -> int"
  108. },
  109. {
  110. "name": "prim::abs.float(float a) -> float"
  111. },
  112. {
  113. "name": "prim::abs.complex(complex a) -> float"
  114. },
  115. {
  116. "name": "prim::abs.Scalar(Scalar a) -> Scalar"
  117. },
  118. {
  119. "name": "prim::abs(Tensor x) -> Tensor"
  120. },
  121. {
  122. "name": "prim::StringIndex(str string, int index) -> str"
  123. },
  124. {
  125. "name": "aten::bin(int i) -> str"
  126. },
  127. {
  128. "name": "aten::_unwrap_optional(t(a)? optional) -> t(a)"
  129. },
  130. {
  131. "name": "prim::AutogradAdd(Any a, Any b) -> Any"
  132. },
  133. {
  134. "name": "prim::AutogradAllNonZero(...) -> bool"
  135. },
  136. {
  137. "name": "prim::AutogradAllZero(...) -> bool"
  138. },
  139. {
  140. "name": "prim::AutogradAnyNonZero(...) -> bool"
  141. },
  142. {
  143. "name": "aten::warn(str message, int stacklevel=2) -> ()"
  144. },
  145. {
  146. "name": "prim::BroadcastSizes(...) -> int[]"
  147. },
  148. {
  149. "name": "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim=False) -> int[]"
  150. },
  151. {
  152. "name": "prim::AutogradZero() -> Tensor"
  153. },
  154. {
  155. "name": "aten::cuda(Tensor(a) self) -> Tensor(a|b)"
  156. },
  157. {
  158. "name": "aten::manual_seed(int seed) -> ()"
  159. },
  160. {
  161. "name": "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)"
  162. },
  163. {
  164. "name": "prim::index(Device self) -> int?"
  165. },
  166. {
  167. "name": "prim::itemsize(Tensor a) -> int"
  168. },
  169. {
  170. "name": "prim::nbytes(Tensor a) -> int"
  171. },
  172. {
  173. "name": "prim::name(Tensor a) -> str?"
  174. },
  175. {
  176. "name": "prim::is_nested(Tensor a) -> bool"
  177. },
  178. {
  179. "name": "prim::is_maia(Tensor a) -> bool"
  180. },
  181. {
  182. "name": "prim::is_meta(Tensor a) -> bool"
  183. },
  184. {
  185. "name": "prim::is_quantized(Tensor a) -> bool"
  186. },
  187. {
  188. "name": "prim::is_ipu(Tensor a) -> bool"
  189. },
  190. {
  191. "name": "prim::is_vulkan(Tensor a) -> bool"
  192. },
  193. {
  194. "name": "prim::is_mps(Tensor a) -> bool"
  195. },
  196. {
  197. "name": "prim::is_mkldnn(Tensor a) -> bool"
  198. },
  199. {
  200. "name": "prim::is_sparse_csr(Tensor a) -> bool"
  201. },
  202. {
  203. "name": "prim::is_sparse(Tensor a) -> bool"
  204. },
  205. {
  206. "name": "prim::grad(Tensor a) -> Tensor(*)"
  207. },
  208. {
  209. "name": "prim::requires_grad(Tensor a) -> bool"
  210. },
  211. {
  212. "name": "aten::device(str a) -> Device"
  213. },
  214. {
  215. "name": "aten::device.with_index(str type, int index) -> Device"
  216. },
  217. {
  218. "name": "prim::rangelist(int n) -> int[]"
  219. },
  220. {
  221. "name": "aten::join(str self, str[] values) -> str"
  222. },
  223. {
  224. "name": "aten::replace(str self, str old, str new, int max=-1) -> str"
  225. },
  226. {
  227. "name": "aten::rstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  228. },
  229. {
  230. "name": "aten::lstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  231. },
  232. {
  233. "name": "aten::find(str self, str substr, int start=0, int end=-1) -> int"
  234. },
  235. {
  236. "name": "aten::count(str self, str substr, int start=0, int end=-1) -> int"
  237. },
  238. {
  239. "name": "aten::count.int(int[] self, int el) -> int"
  240. },
  241. {
  242. "name": "aten::count.float(float[] self, float el) -> int"
  243. },
  244. {
  245. "name": "aten::count.bool(bool[] self, bool el) -> int"
  246. },
  247. {
  248. "name": "aten::count.Tensor(Tensor[] self, Tensor el) -> int"
  249. },
  250. {
  251. "name": "aten::count.str(str[] self, str el) -> int"
  252. },
  253. {
  254. "name": "aten::splitlines(str self, bool keepends=False) -> str[]"
  255. },
  256. {
  257. "name": "aten::strip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  258. },
  259. {
  260. "name": "aten::items.str(Dict(str, t) self) -> ((str, t)[])"
  261. },
  262. {
  263. "name": "aten::items.int(Dict(int, t) self) -> ((int, t)[])"
  264. },
  265. {
  266. "name": "aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])"
  267. },
  268. {
  269. "name": "aten::items.float(Dict(float, t) self) -> ((float, t)[])"
  270. },
  271. {
  272. "name": "aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])"
  273. },
  274. {
  275. "name": "aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])"
  276. },
  277. {
  278. "name": "aten::update.str(Dict(str, t)(a!) self, Dict(str, t)(a!) to_add) -> ()"
  279. },
  280. {
  281. "name": "aten::update.int(Dict(int, t)(a!) self, Dict(int, t)(a!) to_add) -> ()"
  282. },
  283. {
  284. "name": "aten::update.bool(Dict(bool, t)(a!) self, Dict(bool, t)(a!) to_add) -> ()"
  285. },
  286. {
  287. "name": "aten::update.float(Dict(float, t)(a!) self, Dict(float, t)(a!) to_add) -> ()"
  288. },
  289. {
  290. "name": "aten::update.complex(Dict(complex, t)(a!) self, Dict(complex, t)(a!) to_add) -> ()"
  291. },
  292. {
  293. "name": "aten::update.Tensor(Dict(Tensor, t)(a!) self, Dict(Tensor, t)(a!) to_add) -> ()"
  294. },
  295. {
  296. "name": "aten::get.str(Dict(str, t) self, str key) -> t(*)?"
  297. },
  298. {
  299. "name": "aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)"
  300. },
  301. {
  302. "name": "aten::get.int(Dict(int, t) self, int key) -> t(*)?"
  303. },
  304. {
  305. "name": "aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)"
  306. },
  307. {
  308. "name": "aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?"
  309. },
  310. {
  311. "name": "aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)"
  312. },
  313. {
  314. "name": "aten::get.float(Dict(float, t) self, float key) -> t(*)?"
  315. },
  316. {
  317. "name": "aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)"
  318. },
  319. {
  320. "name": "aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?"
  321. },
  322. {
  323. "name": "aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)"
  324. },
  325. {
  326. "name": "aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?"
  327. },
  328. {
  329. "name": "aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)"
  330. },
  331. {
  332. "name": "aten::keys.str(Dict(str, t) self) -> str[](*)"
  333. },
  334. {
  335. "name": "aten::keys.int(Dict(int, t) self) -> int[](*)"
  336. },
  337. {
  338. "name": "aten::keys.bool(Dict(bool, t) self) -> bool[](*)"
  339. },
  340. {
  341. "name": "aten::keys.float(Dict(float, t) self) -> float[](*)"
  342. },
  343. {
  344. "name": "aten::keys.complex(Dict(complex, t) self) -> complex[](*)"
  345. },
  346. {
  347. "name": "aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)"
  348. },
  349. {
  350. "name": "prim::tolist(...) -> ..."
  351. },
  352. {
  353. "name": "prim::data(Tensor(a) a) -> Tensor(a)"
  354. },
  355. {
  356. "name": "prim::is_xpu(Tensor a) -> bool"
  357. },
  358. {
  359. "name": "prim::is_mtia(Tensor a) -> bool"
  360. },
  361. {
  362. "name": "prim::is_xla(Tensor a) -> bool"
  363. },
  364. {
  365. "name": "prim::is_cpu(Tensor a) -> bool"
  366. },
  367. {
  368. "name": "prim::is_cuda(Tensor a) -> bool"
  369. },
  370. {
  371. "name": "aten::dict() -> Dict(str, Tensor)"
  372. },
  373. {
  374. "name": "aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)"
  375. },
  376. {
  377. "name": "aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)"
  378. },
  379. {
  380. "name": "aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)"
  381. },
  382. {
  383. "name": "aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)"
  384. },
  385. {
  386. "name": "aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)"
  387. },
  388. {
  389. "name": "aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)"
  390. },
  391. {
  392. "name": "aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)"
  393. },
  394. {
  395. "name": "aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)"
  396. },
  397. {
  398. "name": "aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)"
  399. },
  400. {
  401. "name": "aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)"
  402. },
  403. {
  404. "name": "aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)"
  405. },
  406. {
  407. "name": "aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)"
  408. },
  409. {
  410. "name": "aten::__contains__.int_list(int[] l, int item) -> bool"
  411. },
  412. {
  413. "name": "aten::__contains__.str_list(str[] l, str item) -> bool"
  414. },
  415. {
  416. "name": "aten::__contains__.str(Dict(str, t) dict, str key) -> bool"
  417. },
  418. {
  419. "name": "aten::__contains__.int(Dict(int, t) dict, int key) -> bool"
  420. },
  421. {
  422. "name": "aten::__contains__.bool(Dict(bool, t) dict, bool key) -> bool"
  423. },
  424. {
  425. "name": "aten::__contains__.float(Dict(float, t) dict, float key) -> bool"
  426. },
  427. {
  428. "name": "aten::__contains__.complex(Dict(complex, t) dict, complex key) -> bool"
  429. },
  430. {
  431. "name": "aten::__contains__.Tensor(Dict(Tensor, t) dict, Tensor key) -> bool"
  432. },
  433. {
  434. "name": "aten::__contains__.float_list(float[] l, float item) -> bool"
  435. },
  436. {
  437. "name": "aten::lower(str self) -> str"
  438. },
  439. {
  440. "name": "prim::type(Device self) -> str"
  441. },
  442. {
  443. "name": "prim::max.int(int a, int b) -> int"
  444. },
  445. {
  446. "name": "prim::max.float(float a, float b) -> float"
  447. },
  448. {
  449. "name": "prim::max.int_float(int a, float b) -> float"
  450. },
  451. {
  452. "name": "prim::max.float_int(float a, int b) -> float"
  453. },
  454. {
  455. "name": "prim::max(Scalar a, Scalar b) -> Scalar"
  456. },
  457. {
  458. "name": "prim::max.int_list(int[] l, int[] r) -> int[]"
  459. },
  460. {
  461. "name": "prim::max.self_int(int[] self) -> int"
  462. },
  463. {
  464. "name": "prim::max.float_list(float[] l, float[] r) -> float[]"
  465. },
  466. {
  467. "name": "prim::max.self_float(float[] self) -> float"
  468. },
  469. {
  470. "name": "prim::max.bool_list(bool[] l, bool[] r) -> bool[]"
  471. },
  472. {
  473. "name": "prim::max.self_bool(bool[] self) -> bool"
  474. },
  475. {
  476. "name": "prim::min.int(int a, int b) -> int"
  477. },
  478. {
  479. "name": "prim::min.float(float a, float b) -> float"
  480. },
  481. {
  482. "name": "prim::min.int_float(int a, float b) -> float"
  483. },
  484. {
  485. "name": "prim::min.float_int(float a, int b) -> float"
  486. },
  487. {
  488. "name": "prim::min(Scalar a, Scalar b) -> Scalar"
  489. },
  490. {
  491. "name": "prim::min.int_list(int[] l, int[] r) -> int[]"
  492. },
  493. {
  494. "name": "prim::min.self_int(int[] self) -> int"
  495. },
  496. {
  497. "name": "prim::min.float_list(float[] l, float[] r) -> float[]"
  498. },
  499. {
  500. "name": "prim::min.self_float(float[] self) -> float"
  501. },
  502. {
  503. "name": "prim::min.bool_list(bool[] l, bool[] r) -> bool[]"
  504. },
  505. {
  506. "name": "prim::min.self_bool(bool[] self) -> bool"
  507. },
  508. {
  509. "name": "aten::floordiv.int(int a, int b) -> int"
  510. },
  511. {
  512. "name": "aten::floordiv.float(float a, float b) -> float"
  513. },
  514. {
  515. "name": "aten::floordiv.int_float(int a, float b) -> float"
  516. },
  517. {
  518. "name": "aten::floordiv.float_int(float a, int b) -> float"
  519. },
  520. {
  521. "name": "aten::floordiv(Scalar a, Scalar b) -> Scalar"
  522. },
  523. {
  524. "name": "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)"
  525. },
  526. {
  527. "name": "prim::VarStack(...) -> Tensor"
  528. },
  529. {
  530. "name": "prim::VarConcat(...) -> Tensor"
  531. },
  532. {
  533. "name": "prim::Print(...) -> ()"
  534. },
  535. {
  536. "name": "prim::Uninitialized() -> Any"
  537. },
  538. {
  539. "name": "aten::len.t(t[] a) -> int"
  540. },
  541. {
  542. "name": "aten::len.Tensor(Tensor t) -> int"
  543. },
  544. {
  545. "name": "aten::len.str(str s) -> int"
  546. },
  547. {
  548. "name": "aten::len.Dict_str(Dict(str, t) self) -> int"
  549. },
  550. {
  551. "name": "aten::len.Dict_int(Dict(int, t) self) -> int"
  552. },
  553. {
  554. "name": "aten::len.Dict_bool(Dict(bool, t) self) -> int"
  555. },
  556. {
  557. "name": "aten::len.Dict_float(Dict(float, t) self) -> int"
  558. },
  559. {
  560. "name": "aten::len.Dict_complex(Dict(complex, t) self) -> int"
  561. },
  562. {
  563. "name": "aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int"
  564. },
  565. {
  566. "name": "aten::len.any(Any[] a) -> int"
  567. },
  568. {
  569. "name": "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"
  570. },
  571. {
  572. "name": "aten::pop.Dict_str(Dict(str, t)(a!) self, str key) -> t(*)"
  573. },
  574. {
  575. "name": "aten::pop.Dict_default_str(Dict(str, t)(a!) self, str key, t default_value) -> t(*)"
  576. },
  577. {
  578. "name": "aten::pop.Dict_int(Dict(int, t)(a!) self, int key) -> t(*)"
  579. },
  580. {
  581. "name": "aten::pop.Dict_default_int(Dict(int, t)(a!) self, int key, t default_value) -> t(*)"
  582. },
  583. {
  584. "name": "aten::pop.Dict_bool(Dict(bool, t)(a!) self, bool key) -> t(*)"
  585. },
  586. {
  587. "name": "aten::pop.Dict_default_bool(Dict(bool, t)(a!) self, bool key, t default_value) -> t(*)"
  588. },
  589. {
  590. "name": "aten::pop.Dict_float(Dict(float, t)(a!) self, float key) -> t(*)"
  591. },
  592. {
  593. "name": "aten::pop.Dict_default_float(Dict(float, t)(a!) self, float key, t default_value) -> t(*)"
  594. },
  595. {
  596. "name": "aten::pop.Dict_complex(Dict(complex, t)(a!) self, complex key) -> t(*)"
  597. },
  598. {
  599. "name": "aten::pop.Dict_default_complex(Dict(complex, t)(a!) self, complex key, t default_value) -> t(*)"
  600. },
  601. {
  602. "name": "aten::pop.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> t(*)"
  603. },
  604. {
  605. "name": "aten::pop.Dict_default_Tensor(Dict(Tensor, t)(a!) self, Tensor key, t default_value) -> t(*)"
  606. },
  607. {
  608. "name": "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"
  609. },
  610. {
  611. "name": "aten::clear.t(t[](a!) self) -> ()"
  612. },
  613. {
  614. "name": "aten::clear.str(Dict(str, t)(a!) self) -> ()"
  615. },
  616. {
  617. "name": "aten::clear.int(Dict(int, t)(a!) self) -> ()"
  618. },
  619. {
  620. "name": "aten::clear.bool(Dict(bool, t)(a!) self) -> ()"
  621. },
  622. {
  623. "name": "aten::clear.float(Dict(float, t)(a!) self) -> ()"
  624. },
  625. {
  626. "name": "aten::clear.complex(Dict(complex, t)(a!) self) -> ()"
  627. },
  628. {
  629. "name": "aten::clear.Tensor(Dict(Tensor, t)(a!) self) -> ()"
  630. },
  631. {
  632. "name": "aten::_set_item.t(t[](a!) l, int idx, t(b -> *) el) -> t[](a!)"
  633. },
  634. {
  635. "name": "aten::_set_item.str(Dict(str, t)(a!) l, str(b -> *) idx, t(c -> *) v) -> ()"
  636. },
  637. {
  638. "name": "aten::_set_item.int(Dict(int, t)(a!) l, int(b -> *) idx, t(c -> *) v) -> ()"
  639. },
  640. {
  641. "name": "aten::_set_item.bool(Dict(bool, t)(a!) l, bool(b -> *) idx, t(c -> *) v) -> ()"
  642. },
  643. {
  644. "name": "aten::_set_item.float(Dict(float, t)(a!) l, float(b -> *) idx, t(c -> *) v) -> ()"
  645. },
  646. {
  647. "name": "aten::_set_item.complex(Dict(complex, t)(a!) l, complex(b -> *) idx, t(c -> *) v) -> ()"
  648. },
  649. {
  650. "name": "aten::_set_item.Tensor(Dict(Tensor, t)(a!) l, Tensor(b -> *) idx, t(c -> *) v) -> ()"
  651. },
  652. {
  653. "name": "aten::extend.t(t[](a!) self, t[] other) -> ()"
  654. },
  655. {
  656. "name": "aten::reverse.t(t[](a!) self) -> ()"
  657. },
  658. {
  659. "name": "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"
  660. },
  661. {
  662. "name": "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"
  663. },
  664. {
  665. "name": "aten::__getitem__.str(str s, int index) -> str"
  666. },
  667. {
  668. "name": "aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)"
  669. },
  670. {
  671. "name": "aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)"
  672. },
  673. {
  674. "name": "aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)"
  675. },
  676. {
  677. "name": "aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)"
  678. },
  679. {
  680. "name": "aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)"
  681. },
  682. {
  683. "name": "aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)"
  684. },
  685. {
  686. "name": "aten::is_contiguous(Tensor self) -> bool"
  687. },
  688. {
  689. "name": "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool"
  690. },
  691. {
  692. "name": "aten::get_device(Tensor self) -> int"
  693. },
  694. {
  695. "name": "aten::dim(Tensor self) -> int"
  696. },
  697. {
  698. "name": "aten::numel(Tensor self) -> int"
  699. },
  700. {
  701. "name": "aten::__isnot__(t1 self, t2 obj) -> bool"
  702. },
  703. {
  704. "name": "aten::__is__(t1 self, t2 obj) -> bool"
  705. },
  706. {
  707. "name": "aten::__not__(bool self) -> bool"
  708. },
  709. {
  710. "name": "prim::layout(Tensor a) -> Layout"
  711. },
  712. {
  713. "name": "prim::dtype(Tensor a) -> int"
  714. },
  715. {
  716. "name": "prim::device(Tensor a) -> Device"
  717. },
  718. {
  719. "name": "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"
  720. },
  721. {
  722. "name": "prim::TupleIndex(Any tup, int i) -> Any"
  723. },
  724. {
  725. "name": "prim::EnumValue.int(AnyEnumType enum) -> int"
  726. },
  727. {
  728. "name": "prim::EnumValue.float(AnyEnumType enum) -> float"
  729. },
  730. {
  731. "name": "prim::EnumValue.str(AnyEnumType enum) -> str"
  732. },
  733. {
  734. "name": "prim::EnumName(AnyEnumType enum) -> str"
  735. },
  736. {
  737. "name": "prim::RaiseException(str msg, str? cls=None) -> ()"
  738. },
  739. {
  740. "name": "prim::NumToTensor.Scalar(Scalar a) -> Tensor"
  741. },
  742. {
  743. "name": "prim::NumToTensor.bool(bool a) -> Tensor"
  744. },
  745. {
  746. "name": "aten::format(str self, ...) -> str",
  747. "is_vararg": true
  748. },
  749. {
  750. "name": "aten::Complex.Scalar(Scalar a) -> complex"
  751. },
  752. {
  753. "name": "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"
  754. },
  755. {
  756. "name": "aten::Complex.int_bool(int x, bool y) -> complex"
  757. },
  758. {
  759. "name": "aten::Complex.bool_int(bool x, int y) -> complex"
  760. },
  761. {
  762. "name": "aten::Complex.float_bool(float x, bool y) -> complex"
  763. },
  764. {
  765. "name": "aten::Complex.bool_float(bool x, float y) -> complex"
  766. },
  767. {
  768. "name": "aten::Complex.float_int(float x, int y) -> complex"
  769. },
  770. {
  771. "name": "aten::Complex.int_float(int x, float y) -> complex"
  772. },
  773. {
  774. "name": "aten::Complex.int_int(int x, int y) -> complex"
  775. },
  776. {
  777. "name": "aten::Complex.bool_bool(bool x, bool y) -> complex"
  778. },
  779. {
  780. "name": "aten::Complex.float_float(float x, float y) -> complex"
  781. },
  782. {
  783. "name": "aten::Complex.Tensor_float(Tensor x, float y) -> complex"
  784. },
  785. {
  786. "name": "aten::Complex.float_Tensor(float x, Tensor y) -> complex"
  787. },
  788. {
  789. "name": "aten::Complex.Tensor_int(Tensor x, int y) -> complex"
  790. },
  791. {
  792. "name": "aten::Complex.int_Tensor(int x, Tensor y) -> complex"
  793. },
  794. {
  795. "name": "aten::Complex.Tensor_bool(Tensor x, bool y) -> complex"
  796. },
  797. {
  798. "name": "aten::Complex.bool_Tensor(bool x, Tensor y) -> complex"
  799. },
  800. {
  801. "name": "aten::Float.Tensor(Tensor a) -> float"
  802. },
  803. {
  804. "name": "aten::Float.Scalar(Scalar a) -> float"
  805. },
  806. {
  807. "name": "aten::Float.int(int a) -> float"
  808. },
  809. {
  810. "name": "aten::Float.bool(bool a) -> float"
  811. },
  812. {
  813. "name": "aten::Float.str(str a) -> float"
  814. },
  815. {
  816. "name": "aten::Int.Tensor(Tensor a) -> int"
  817. },
  818. {
  819. "name": "aten::Int.bool(bool a) -> int"
  820. },
  821. {
  822. "name": "aten::Int.float(float a) -> int"
  823. },
  824. {
  825. "name": "aten::Int.Scalar(Scalar a) -> int"
  826. },
  827. {
  828. "name": "aten::Int.str(str a) -> int"
  829. },
  830. {
  831. "name": "aten::Bool.Tensor(Tensor a) -> bool"
  832. },
  833. {
  834. "name": "aten::Bool.int(int a) -> bool"
  835. },
  836. {
  837. "name": "aten::Bool.float(float a) -> bool"
  838. },
  839. {
  840. "name": "aten::ScalarImplicit(Tensor a) -> Scalar"
  841. },
  842. {
  843. "name": "aten::FloatImplicit(Tensor a) -> float"
  844. },
  845. {
  846. "name": "aten::ComplexImplicit(Tensor a) -> complex"
  847. },
  848. {
  849. "name": "aten::IntImplicit(Tensor a) -> int"
  850. },
  851. {
  852. "name": "prim::unchecked_cast(t x) -> t"
  853. },
  854. {
  855. "name": "prim::TupleUnpack(Any tup) -> ..."
  856. },
  857. {
  858. "name": "aten::__derive_index(int index, int start, int step) -> int"
  859. },
  860. {
  861. "name": "aten::__range_length(int lo, int hi, int step) -> int"
  862. },
  863. {
  864. "name": "aten::cpu(Tensor(a) self) -> Tensor(a|b)"
  865. },
  866. {
  867. "name": "aten::list(str t) -> str[]"
  868. },
  869. {
  870. "name": "aten::list.t(t[] l) -> t[]"
  871. },
  872. {
  873. "name": "aten::str(t elem) -> str"
  874. },
  875. {
  876. "name": "aten::_indices(Tensor(a) self) -> Tensor(a)"
  877. },
  878. {
  879. "name": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0., str padding_side=\"right\") -> Tensor"
  880. },
  881. {
  882. "name": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor"
  883. },
  884. {
  885. "name": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor"
  886. },
  887. {
  888. "name": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  889. },
  890. {
  891. "name": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  892. },
  893. {
  894. "name": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  895. },
  896. {
  897. "name": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  898. },
  899. {
  900. "name": "aten::column_stack(Tensor[] tensors) -> Tensor"
  901. },
  902. {
  903. "name": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  904. },
  905. {
  906. "name": "aten::argwhere(Tensor self) -> Tensor"
  907. },
  908. {
  909. "name": "aten::nonzero_numpy(Tensor self) -> Tensor[]"
  910. },
  911. {
  912. "name": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  913. },
  914. {
  915. "name": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  916. },
  917. {
  918. "name": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)"
  919. },
  920. {
  921. "name": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)"
  922. },
  923. {
  924. "name": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  925. "category": "Layer"
  926. },
  927. {
  928. "name": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  929. },
  930. {
  931. "name": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  932. "category": "Layer"
  933. },
  934. {
  935. "name": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  936. },
  937. {
  938. "name": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  939. "category": "Layer"
  940. },
  941. {
  942. "name": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  943. "category": "Layer"
  944. },
  945. {
  946. "name": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)",
  947. "category": "Layer"
  948. },
  949. {
  950. "name": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)",
  951. "category": "Layer"
  952. },
  953. {
  954. "name": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)"
  955. },
  956. {
  957. "name": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor"
  958. },
  959. {
  960. "name": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor",
  961. "category": "Quantization"
  962. },
  963. {
  964. "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor",
  965. "category": "Quantization"
  966. },
  967. {
  968. "name": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor"
  969. },
  970. {
  971. "name": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  972. },
  973. {
  974. "name": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  975. },
  976. {
  977. "name": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor"
  978. },
  979. {
  980. "name": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor"
  981. },
  982. {
  983. "name": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor"
  984. },
  985. {
  986. "name": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor"
  987. },
  988. {
  989. "name": "aten::coalesce(Tensor(a) self) -> Tensor(a)"
  990. },
  991. {
  992. "name": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor"
  993. },
  994. {
  995. "name": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor"
  996. },
  997. {
  998. "name": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  999. },
  1000. {
  1001. "name": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor"
  1002. },
  1003. {
  1004. "name": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor"
  1005. },
  1006. {
  1007. "name": "aten::sym_size.int(Tensor self, int dim) -> SymInt"
  1008. },
  1009. {
  1010. "name": "aten::sym_size(Tensor self) -> SymInt[]"
  1011. },
  1012. {
  1013. "name": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)"
  1014. },
  1015. {
  1016. "name": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)",
  1017. "category": "Transform"
  1018. },
  1019. {
  1020. "name": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)"
  1021. },
  1022. {
  1023. "name": "aten::diagflat(Tensor self, int offset=0) -> Tensor"
  1024. },
  1025. {
  1026. "name": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor"
  1027. },
  1028. {
  1029. "name": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  1030. },
  1031. {
  1032. "name": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  1033. },
  1034. {
  1035. "name": "aten::_dim_arange(Tensor like, int dim) -> Tensor"
  1036. },
  1037. {
  1038. "name": "aten::_shape_as_tensor(Tensor self) -> Tensor"
  1039. },
  1040. {
  1041. "name": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor"
  1042. },
  1043. {
  1044. "name": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor"
  1045. },
  1046. {
  1047. "name": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor"
  1048. },
  1049. {
  1050. "name": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor"
  1051. },
  1052. {
  1053. "name": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor"
  1054. },
  1055. {
  1056. "name": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor"
  1057. },
  1058. {
  1059. "name": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor"
  1060. },
  1061. {
  1062. "name": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor"
  1063. },
  1064. {
  1065. "name": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  1066. },
  1067. {
  1068. "name": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor"
  1069. },
  1070. {
  1071. "name": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor"
  1072. },
  1073. {
  1074. "name": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  1075. },
  1076. {
  1077. "name": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)"
  1078. },
  1079. {
  1080. "name": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1081. },
  1082. {
  1083. "name": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  1084. },
  1085. {
  1086. "name": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None) -> Tensor"
  1087. },
  1088. {
  1089. "name": "aten::col_indices(Tensor(a) self) -> Tensor(a)"
  1090. },
  1091. {
  1092. "name": "aten::crow_indices(Tensor(a) self) -> Tensor(a)"
  1093. },
  1094. {
  1095. "name": "aten::values(Tensor(a) self) -> Tensor(a)"
  1096. },
  1097. {
  1098. "name": "aten::values.str(Dict(str, t) self) -> t[](*)"
  1099. },
  1100. {
  1101. "name": "aten::values.int(Dict(int, t) self) -> t[](*)"
  1102. },
  1103. {
  1104. "name": "aten::values.bool(Dict(bool, t) self) -> t[](*)"
  1105. },
  1106. {
  1107. "name": "aten::values.float(Dict(float, t) self) -> t[](*)"
  1108. },
  1109. {
  1110. "name": "aten::values.complex(Dict(complex, t) self) -> t[](*)"
  1111. },
  1112. {
  1113. "name": "aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)"
  1114. },
  1115. {
  1116. "name": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1117. },
  1118. {
  1119. "name": "aten::_coalesce(Tensor self) -> Tensor"
  1120. },
  1121. {
  1122. "name": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
  1123. "category": "Normalization"
  1124. },
  1125. {
  1126. "name": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))",
  1127. "category": "Normalization"
  1128. },
  1129. {
  1130. "name": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)",
  1131. "category": "Normalization"
  1132. },
  1133. {
  1134. "name": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1135. },
  1136. {
  1137. "name": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  1138. },
  1139. {
  1140. "name": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1141. },
  1142. {
  1143. "name": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  1144. },
  1145. {
  1146. "name": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1147. },
  1148. {
  1149. "name": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  1150. },
  1151. {
  1152. "name": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)"
  1153. },
  1154. {
  1155. "name": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1156. },
  1157. {
  1158. "name": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor"
  1159. },
  1160. {
  1161. "name": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1162. },
  1163. {
  1164. "name": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor"
  1165. },
  1166. {
  1167. "name": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"
  1168. },
  1169. {
  1170. "name": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor"
  1171. },
  1172. {
  1173. "name": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  1174. },
  1175. {
  1176. "name": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]"
  1177. },
  1178. {
  1179. "name": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  1180. },
  1181. {
  1182. "name": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  1183. },
  1184. {
  1185. "name": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  1186. },
  1187. {
  1188. "name": "aten::detach_copy(Tensor self) -> Tensor"
  1189. },
  1190. {
  1191. "name": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1192. },
  1193. {
  1194. "name": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor"
  1195. },
  1196. {
  1197. "name": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  1198. },
  1199. {
  1200. "name": "aten::permute_copy(Tensor self, int[] dims) -> Tensor"
  1201. },
  1202. {
  1203. "name": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  1204. },
  1205. {
  1206. "name": "aten::view_as_complex_copy(Tensor self) -> Tensor"
  1207. },
  1208. {
  1209. "name": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1210. },
  1211. {
  1212. "name": "aten::view_as_real_copy(Tensor self) -> Tensor"
  1213. },
  1214. {
  1215. "name": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1216. },
  1217. {
  1218. "name": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  1219. },
  1220. {
  1221. "name": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor"
  1222. },
  1223. {
  1224. "name": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor"
  1225. },
  1226. {
  1227. "name": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"
  1228. },
  1229. {
  1230. "name": "aten::take(Tensor self, Tensor index) -> Tensor",
  1231. "category": "Activation"
  1232. },
  1233. {
  1234. "name": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  1235. },
  1236. {
  1237. "name": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  1238. },
  1239. {
  1240. "name": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  1241. },
  1242. {
  1243. "name": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  1244. },
  1245. {
  1246. "name": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)"
  1247. },
  1248. {
  1249. "name": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)"
  1250. },
  1251. {
  1252. "name": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)"
  1253. },
  1254. {
  1255. "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor, Tensor, Tensor)",
  1256. "category": "Quantization"
  1257. },
  1258. {
  1259. "name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> Tensor",
  1260. "category": "Quantization"
  1261. },
  1262. {
  1263. "name": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)",
  1264. "category": "Quantization"
  1265. },
  1266. {
  1267. "name": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)",
  1268. "category": "Quantization"
  1269. },
  1270. {
  1271. "name": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))",
  1272. "category": "Quantization"
  1273. },
  1274. {
  1275. "name": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor",
  1276. "category": "Quantization"
  1277. },
  1278. {
  1279. "name": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1280. },
  1281. {
  1282. "name": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor",
  1283. "category": "Quantization"
  1284. },
  1285. {
  1286. "name": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor",
  1287. "category": "Quantization"
  1288. },
  1289. {
  1290. "name": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]",
  1291. "category": "Quantization"
  1292. },
  1293. {
  1294. "name": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1295. },
  1296. {
  1297. "name": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1298. },
  1299. {
  1300. "name": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()"
  1301. },
  1302. {
  1303. "name": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor",
  1304. "category": "Quantization"
  1305. },
  1306. {
  1307. "name": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)"
  1308. },
  1309. {
  1310. "name": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)"
  1311. },
  1312. {
  1313. "name": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1314. },
  1315. {
  1316. "name": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  1317. },
  1318. {
  1319. "name": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  1320. },
  1321. {
  1322. "name": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1323. },
  1324. {
  1325. "name": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1326. },
  1327. {
  1328. "name": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  1329. },
  1330. {
  1331. "name": "aten::cumsum_.dimname(Tensor(a!) self, str dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  1332. },
  1333. {
  1334. "name": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor"
  1335. },
  1336. {
  1337. "name": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)"
  1338. },
  1339. {
  1340. "name": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor"
  1341. },
  1342. {
  1343. "name": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)"
  1344. },
  1345. {
  1346. "name": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor"
  1347. },
  1348. {
  1349. "name": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  1350. },
  1351. {
  1352. "name": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  1353. },
  1354. {
  1355. "name": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1356. },
  1357. {
  1358. "name": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)"
  1359. },
  1360. {
  1361. "name": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)"
  1362. },
  1363. {
  1364. "name": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1365. },
  1366. {
  1367. "name": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  1368. },
  1369. {
  1370. "name": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)"
  1371. },
  1372. {
  1373. "name": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)"
  1374. },
  1375. {
  1376. "name": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)"
  1377. },
  1378. {
  1379. "name": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)"
  1380. },
  1381. {
  1382. "name": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor"
  1383. },
  1384. {
  1385. "name": "aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)"
  1386. },
  1387. {
  1388. "name": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)"
  1389. },
  1390. {
  1391. "name": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1392. },
  1393. {
  1394. "name": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1395. },
  1396. {
  1397. "name": "aten::dequantize.self(Tensor self) -> Tensor",
  1398. "category": "Quantization"
  1399. },
  1400. {
  1401. "name": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1402. },
  1403. {
  1404. "name": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()"
  1405. },
  1406. {
  1407. "name": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]",
  1408. "category": "Quantization"
  1409. },
  1410. {
  1411. "name": "aten::dequantize.tensor(Tensor qtensor) -> Tensor",
  1412. "category": "Quantization"
  1413. },
  1414. {
  1415. "name": "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]",
  1416. "category": "Quantization"
  1417. },
  1418. {
  1419. "name": "aten::dequantize.any(Any tensors) -> Any",
  1420. "category": "Quantization"
  1421. },
  1422. {
  1423. "name": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor"
  1424. },
  1425. {
  1426. "name": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)"
  1427. },
  1428. {
  1429. "name": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1430. },
  1431. {
  1432. "name": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  1433. },
  1434. {
  1435. "name": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
  1436. "category": "Layer"
  1437. },
  1438. {
  1439. "name": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  1440. },
  1441. {
  1442. "name": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor",
  1443. "category": "Tensor"
  1444. },
  1445. {
  1446. "name": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  1447. },
  1448. {
  1449. "name": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  1450. },
  1451. {
  1452. "name": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  1453. },
  1454. {
  1455. "name": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"
  1456. },
  1457. {
  1458. "name": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  1459. },
  1460. {
  1461. "name": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  1462. },
  1463. {
  1464. "name": "aten::zeros.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1465. },
  1466. {
  1467. "name": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1468. },
  1469. {
  1470. "name": "aten::zeros.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  1471. },
  1472. {
  1473. "name": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  1474. },
  1475. {
  1476. "name": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1477. },
  1478. {
  1479. "name": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)"
  1480. },
  1481. {
  1482. "name": "aten::ones.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1483. },
  1484. {
  1485. "name": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1486. },
  1487. {
  1488. "name": "aten::ones.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  1489. },
  1490. {
  1491. "name": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  1492. },
  1493. {
  1494. "name": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1495. },
  1496. {
  1497. "name": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1498. },
  1499. {
  1500. "name": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  1501. },
  1502. {
  1503. "name": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)"
  1504. },
  1505. {
  1506. "name": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1507. },
  1508. {
  1509. "name": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1510. },
  1511. {
  1512. "name": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1513. },
  1514. {
  1515. "name": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"
  1516. },
  1517. {
  1518. "name": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
  1519. },
  1520. {
  1521. "name": "quantized::conv_transpose2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  1522. },
  1523. {
  1524. "name": "quantized::conv_transpose2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  1525. },
  1526. {
  1527. "name": "quantized::sigmoid(Tensor qx, float output_scale, int output_zero_point) -> Tensor",
  1528. "category": "Activation"
  1529. },
  1530. {
  1531. "name": "quantized::leaky_relu(Tensor qx, Scalar negative_slope, bool inplace, float output_scale, int output_zero_point) -> Tensor",
  1532. "category": "Activation"
  1533. },
  1534. {
  1535. "name": "quantized::mul_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  1536. },
  1537. {
  1538. "name": "quantized::mul_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  1539. },
  1540. {
  1541. "name": "quantized::mul_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  1542. },
  1543. {
  1544. "name": "quantized::mul_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  1545. },
  1546. {
  1547. "name": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1548. },
  1549. {
  1550. "name": "aten::int_repr(Tensor self) -> Tensor"
  1551. },
  1552. {
  1553. "name": "quantized::linear_relu(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  1554. "category": "Layer"
  1555. },
  1556. {
  1557. "name": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)"
  1558. },
  1559. {
  1560. "name": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor"
  1561. },
  1562. {
  1563. "name": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  1564. },
  1565. {
  1566. "name": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"
  1567. },
  1568. {
  1569. "name": "aten::index_copy_.dimname(Tensor(a!) self, str dim, Tensor index, Tensor source) -> Tensor(a!)"
  1570. },
  1571. {
  1572. "name": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  1573. },
  1574. {
  1575. "name": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor"
  1576. },
  1577. {
  1578. "name": "quantized::conv_transpose1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  1579. },
  1580. {
  1581. "name": "quantized::conv1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  1582. },
  1583. {
  1584. "name": "quantized::conv_transpose2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  1585. },
  1586. {
  1587. "name": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"
  1588. },
  1589. {
  1590. "name": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"
  1591. },
  1592. {
  1593. "name": "quantized::conv2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  1594. },
  1595. {
  1596. "name": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1597. },
  1598. {
  1599. "name": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1600. },
  1601. {
  1602. "name": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  1603. },
  1604. {
  1605. "name": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  1606. },
  1607. {
  1608. "name": "quantized::conv3d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  1609. "category": "Layer"
  1610. },
  1611. {
  1612. "name": "quantized::conv3d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  1613. "category": "Layer"
  1614. },
  1615. {
  1616. "name": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1617. },
  1618. {
  1619. "name": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1620. },
  1621. {
  1622. "name": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1623. },
  1624. {
  1625. "name": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1626. },
  1627. {
  1628. "name": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  1629. },
  1630. {
  1631. "name": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  1632. },
  1633. {
  1634. "name": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)"
  1635. },
  1636. {
  1637. "name": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)"
  1638. },
  1639. {
  1640. "name": "quantized::conv3d.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  1641. "category": "Layer"
  1642. },
  1643. {
  1644. "name": "quantized::conv3d(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  1645. "category": "Layer"
  1646. },
  1647. {
  1648. "name": "quantized::conv2d.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  1649. "category": "Layer"
  1650. },
  1651. {
  1652. "name": "quantized::conv2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  1653. "category": "Layer"
  1654. },
  1655. {
  1656. "name": "quantized::cat(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  1657. "category": "Tensor"
  1658. },
  1659. {
  1660. "name": "quantized::batch_norm2d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  1661. "category": "Normalization"
  1662. },
  1663. {
  1664. "name": "quantized::batch_norm2d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  1665. "category": "Normalization"
  1666. },
  1667. {
  1668. "name": "aten::gelu_(Tensor(a!) self, *, str approximate=\"none\") -> Tensor(a!)",
  1669. "category": "Activation"
  1670. },
  1671. {
  1672. "name": "quantized::add_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  1673. },
  1674. {
  1675. "name": "quantized::add_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  1676. },
  1677. {
  1678. "name": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1679. },
  1680. {
  1681. "name": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1682. },
  1683. {
  1684. "name": "quantized::add_scalar(Tensor qa, Scalar b) -> Tensor qc"
  1685. },
  1686. {
  1687. "name": "quantized::add_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  1688. },
  1689. {
  1690. "name": "aten::full.names(int[] size, Scalar fill_value, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1691. },
  1692. {
  1693. "name": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1694. },
  1695. {
  1696. "name": "aten::full.names_out(int[] size, Scalar fill_value, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  1697. },
  1698. {
  1699. "name": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  1700. },
  1701. {
  1702. "name": "quantized::quantized_rnn_tanh_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  1703. },
  1704. {
  1705. "name": "quantized::quantized_gru_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  1706. },
  1707. {
  1708. "name": "quantized::make_quantized_cell_params_dynamic(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh, bool reduce_range=False) -> __torch__.torch.classes.rnn.CellParamsBase"
  1709. },
  1710. {
  1711. "name": "quantized::linear_relu_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  1712. "category": "Layer"
  1713. },
  1714. {
  1715. "name": "aten::square_(Tensor(a!) self) -> Tensor(a!)"
  1716. },
  1717. {
  1718. "name": "aten::is_pinned(Tensor self, Device? device=None) -> bool"
  1719. },
  1720. {
  1721. "name": "quantized::linear_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  1722. "category": "Layer"
  1723. },
  1724. {
  1725. "name": "aten::equal(Tensor self, Tensor other) -> bool"
  1726. },
  1727. {
  1728. "name": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  1729. },
  1730. {
  1731. "name": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  1732. },
  1733. {
  1734. "name": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  1735. },
  1736. {
  1737. "name": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)"
  1738. },
  1739. {
  1740. "name": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  1741. },
  1742. {
  1743. "name": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  1744. },
  1745. {
  1746. "name": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  1747. },
  1748. {
  1749. "name": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1750. },
  1751. {
  1752. "name": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  1753. },
  1754. {
  1755. "name": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  1756. },
  1757. {
  1758. "name": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  1759. "category": "Dropout"
  1760. },
  1761. {
  1762. "name": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  1763. "category": "Dropout"
  1764. },
  1765. {
  1766. "name": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor",
  1767. "category": "Dropout"
  1768. },
  1769. {
  1770. "name": "aten::block_diag(Tensor[] tensors) -> Tensor"
  1771. },
  1772. {
  1773. "name": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  1774. },
  1775. {
  1776. "name": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]"
  1777. },
  1778. {
  1779. "name": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()"
  1780. },
  1781. {
  1782. "name": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]",
  1783. "category": "Tensor"
  1784. },
  1785. {
  1786. "name": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  1787. },
  1788. {
  1789. "name": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor"
  1790. },
  1791. {
  1792. "name": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)"
  1793. },
  1794. {
  1795. "name": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor"
  1796. },
  1797. {
  1798. "name": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)"
  1799. },
  1800. {
  1801. "name": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor"
  1802. },
  1803. {
  1804. "name": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor"
  1805. },
  1806. {
  1807. "name": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  1808. },
  1809. {
  1810. "name": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1811. },
  1812. {
  1813. "name": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor",
  1814. "category": "Layer"
  1815. },
  1816. {
  1817. "name": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)"
  1818. },
  1819. {
  1820. "name": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor"
  1821. },
  1822. {
  1823. "name": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)"
  1824. },
  1825. {
  1826. "name": "aten::triu(Tensor self, int diagonal=0) -> Tensor"
  1827. },
  1828. {
  1829. "name": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  1830. },
  1831. {
  1832. "name": "aten::tril(Tensor self, int diagonal=0) -> Tensor",
  1833. "category": "Layer"
  1834. },
  1835. {
  1836. "name": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  1837. },
  1838. {
  1839. "name": "aten::flip(Tensor self, int[] dims) -> Tensor"
  1840. },
  1841. {
  1842. "name": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  1843. },
  1844. {
  1845. "name": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)",
  1846. "category": "Activation"
  1847. },
  1848. {
  1849. "name": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  1850. },
  1851. {
  1852. "name": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  1853. },
  1854. {
  1855. "name": "aten::silu(Tensor self) -> Tensor",
  1856. "category": "Activation"
  1857. },
  1858. {
  1859. "name": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1860. },
  1861. {
  1862. "name": "aten::silu_(Tensor(a!) self) -> Tensor(a!)",
  1863. "category": "Activation"
  1864. },
  1865. {
  1866. "name": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)",
  1867. "category": "Activation"
  1868. },
  1869. {
  1870. "name": "quantized::add_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  1871. },
  1872. {
  1873. "name": "quantized::add_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  1874. },
  1875. {
  1876. "name": "aten::gelu(Tensor self, *, str approximate=\"none\") -> Tensor",
  1877. "category": "Activation"
  1878. },
  1879. {
  1880. "name": "aten::gelu.out(Tensor self, *, str approximate=\"none\", Tensor(a!) out) -> Tensor(a!)"
  1881. },
  1882. {
  1883. "name": "aten::celu(Tensor self, Scalar alpha=1.) -> Tensor",
  1884. "category": "Activation"
  1885. },
  1886. {
  1887. "name": "aten::celu.out(Tensor self, Scalar alpha=1., *, Tensor(a!) out) -> Tensor(a!)"
  1888. },
  1889. {
  1890. "name": "aten::celu_(Tensor(a!) self, Scalar alpha=1.) -> Tensor(a!)"
  1891. },
  1892. {
  1893. "name": "aten::relu_(Tensor(a!) self) -> Tensor(a!)",
  1894. "category": "Activation"
  1895. },
  1896. {
  1897. "name": "quantized::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor",
  1898. "category": "Normalization"
  1899. },
  1900. {
  1901. "name": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor",
  1902. "category": "Activation"
  1903. },
  1904. {
  1905. "name": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"
  1906. },
  1907. {
  1908. "name": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)",
  1909. "category": "Activation"
  1910. },
  1911. {
  1912. "name": "aten::hardswish(Tensor self) -> Tensor",
  1913. "category": "Activation"
  1914. },
  1915. {
  1916. "name": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1917. },
  1918. {
  1919. "name": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)",
  1920. "category": "Activation"
  1921. },
  1922. {
  1923. "name": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor",
  1924. "category": "Activation"
  1925. },
  1926. {
  1927. "name": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)",
  1928. "category": "Activation"
  1929. },
  1930. {
  1931. "name": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)",
  1932. "category": "Activation"
  1933. },
  1934. {
  1935. "name": "aten::hardsigmoid(Tensor self) -> Tensor",
  1936. "category": "Activation"
  1937. },
  1938. {
  1939. "name": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1940. },
  1941. {
  1942. "name": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)",
  1943. "category": "Activation"
  1944. },
  1945. {
  1946. "name": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  1947. },
  1948. {
  1949. "name": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  1950. },
  1951. {
  1952. "name": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor",
  1953. "category": "Activation"
  1954. },
  1955. {
  1956. "name": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)"
  1957. },
  1958. {
  1959. "name": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)",
  1960. "category": "Activation"
  1961. },
  1962. {
  1963. "name": "aten::erfc(Tensor self) -> Tensor"
  1964. },
  1965. {
  1966. "name": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1967. },
  1968. {
  1969. "name": "aten::erfc.int(int a) -> float"
  1970. },
  1971. {
  1972. "name": "aten::erfc.float(float a) -> float"
  1973. },
  1974. {
  1975. "name": "aten::erfc.Scalar(Scalar a) -> Scalar"
  1976. },
  1977. {
  1978. "name": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor",
  1979. "category": "Activation"
  1980. },
  1981. {
  1982. "name": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  1983. },
  1984. {
  1985. "name": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)",
  1986. "category": "Activation"
  1987. },
  1988. {
  1989. "name": "aten::tan_(Tensor(a!) self) -> Tensor(a!)"
  1990. },
  1991. {
  1992. "name": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)"
  1993. },
  1994. {
  1995. "name": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor"
  1996. },
  1997. {
  1998. "name": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)"
  1999. },
  2000. {
  2001. "name": "aten::mvlgamma(Tensor self, int p) -> Tensor"
  2002. },
  2003. {
  2004. "name": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)"
  2005. },
  2006. {
  2007. "name": "aten::mish(Tensor self) -> Tensor",
  2008. "category": "Activation"
  2009. },
  2010. {
  2011. "name": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2012. },
  2013. {
  2014. "name": "aten::mish_(Tensor(a!) self) -> Tensor(a!)",
  2015. "category": "Activation"
  2016. },
  2017. {
  2018. "name": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)"
  2019. },
  2020. {
  2021. "name": "aten::log2_(Tensor(a!) self) -> Tensor(a!)"
  2022. },
  2023. {
  2024. "name": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)"
  2025. },
  2026. {
  2027. "name": "aten::log10_(Tensor(a!) self) -> Tensor(a!)"
  2028. },
  2029. {
  2030. "name": "aten::log_(Tensor(a!) self) -> Tensor(a!)"
  2031. },
  2032. {
  2033. "name": "quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  2034. },
  2035. {
  2036. "name": "aten::isinf(Tensor self) -> Tensor"
  2037. },
  2038. {
  2039. "name": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2040. },
  2041. {
  2042. "name": "aten::isinf.float(float a) -> bool"
  2043. },
  2044. {
  2045. "name": "aten::isinf.complex(complex a) -> bool"
  2046. },
  2047. {
  2048. "name": "quantized::linear_prepack_fp16_legacy(Tensor W, Tensor? B=None) -> Tensor W_prepack"
  2049. },
  2050. {
  2051. "name": "aten::isnan(Tensor self) -> Tensor"
  2052. },
  2053. {
  2054. "name": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2055. },
  2056. {
  2057. "name": "aten::isnan.float(float a) -> bool"
  2058. },
  2059. {
  2060. "name": "aten::isnan.complex(complex a) -> bool"
  2061. },
  2062. {
  2063. "name": "aten::erf(Tensor self) -> Tensor"
  2064. },
  2065. {
  2066. "name": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2067. },
  2068. {
  2069. "name": "aten::erf.int(int a) -> float"
  2070. },
  2071. {
  2072. "name": "aten::erf.float(float a) -> float"
  2073. },
  2074. {
  2075. "name": "aten::erf.Scalar(Scalar a) -> Scalar"
  2076. },
  2077. {
  2078. "name": "aten::bitwise_not(Tensor self) -> Tensor"
  2079. },
  2080. {
  2081. "name": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2082. },
  2083. {
  2084. "name": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)"
  2085. },
  2086. {
  2087. "name": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)"
  2088. },
  2089. {
  2090. "name": "aten::atan_(Tensor(a!) self) -> Tensor(a!)"
  2091. },
  2092. {
  2093. "name": "aten::asinh(Tensor self) -> Tensor"
  2094. },
  2095. {
  2096. "name": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2097. },
  2098. {
  2099. "name": "aten::asinh.int(int a) -> float"
  2100. },
  2101. {
  2102. "name": "aten::asinh.float(float a) -> float"
  2103. },
  2104. {
  2105. "name": "aten::asinh.complex(complex a) -> complex"
  2106. },
  2107. {
  2108. "name": "aten::asinh.Scalar(Scalar a) -> Scalar"
  2109. },
  2110. {
  2111. "name": "aten::sign_(Tensor(a!) self) -> Tensor(a!)"
  2112. },
  2113. {
  2114. "name": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  2115. },
  2116. {
  2117. "name": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  2118. },
  2119. {
  2120. "name": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  2121. "category": "Transform"
  2122. },
  2123. {
  2124. "name": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  2125. "category": "Transform"
  2126. },
  2127. {
  2128. "name": "aten::gather.dimname(Tensor self, str dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  2129. "category": "Transform"
  2130. },
  2131. {
  2132. "name": "aten::gather.dimname_out(Tensor self, str dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  2133. "category": "Transform"
  2134. },
  2135. {
  2136. "name": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  2137. },
  2138. {
  2139. "name": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  2140. },
  2141. {
  2142. "name": "aten::index_add.dimname(Tensor self, str dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  2143. },
  2144. {
  2145. "name": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)"
  2146. },
  2147. {
  2148. "name": "quantized::embedding_bag_byte_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor"
  2149. },
  2150. {
  2151. "name": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"
  2152. },
  2153. {
  2154. "name": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  2155. },
  2156. {
  2157. "name": "aten::index_fill.Dimname_Scalar(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  2158. },
  2159. {
  2160. "name": "aten::index_fill.Dimname_Tensor(Tensor self, str dim, Tensor index, Tensor value) -> Tensor"
  2161. },
  2162. {
  2163. "name": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  2164. },
  2165. {
  2166. "name": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  2167. },
  2168. {
  2169. "name": "quantized::embedding_bag_4bit_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor"
  2170. },
  2171. {
  2172. "name": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"
  2173. },
  2174. {
  2175. "name": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  2176. },
  2177. {
  2178. "name": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, str dim, Tensor index, Scalar value) -> Tensor(a!)"
  2179. },
  2180. {
  2181. "name": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, str dim, Tensor index, Tensor value) -> Tensor(a!)"
  2182. },
  2183. {
  2184. "name": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor"
  2185. },
  2186. {
  2187. "name": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  2188. },
  2189. {
  2190. "name": "aten::index_select.dimname(Tensor self, str dim, Tensor index) -> Tensor"
  2191. },
  2192. {
  2193. "name": "aten::index_select.dimname_out(Tensor self, str dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  2194. },
  2195. {
  2196. "name": "aten::logit(Tensor self, float? eps=None) -> Tensor"
  2197. },
  2198. {
  2199. "name": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"
  2200. },
  2201. {
  2202. "name": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  2203. },
  2204. {
  2205. "name": "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  2206. },
  2207. {
  2208. "name": "quantized::celu(Tensor self, float output_scale, int output_zero_point, Scalar alpha=1) -> Tensor",
  2209. "category": "Activation"
  2210. },
  2211. {
  2212. "name": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"
  2213. },
  2214. {
  2215. "name": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)"
  2216. },
  2217. {
  2218. "name": "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"
  2219. },
  2220. {
  2221. "name": "aten::index.str(str self, str substr, int start=0, int end=-1) -> int"
  2222. },
  2223. {
  2224. "name": "aten::index.list_int(int[] self, int el) -> int"
  2225. },
  2226. {
  2227. "name": "aten::index.list_float(float[] self, float el) -> int"
  2228. },
  2229. {
  2230. "name": "aten::index.list_bool(bool[] self, bool el) -> int"
  2231. },
  2232. {
  2233. "name": "aten::index.list_Tensor(Tensor[] self, Tensor el) -> int"
  2234. },
  2235. {
  2236. "name": "aten::index.list_str(str[] self, str el) -> int"
  2237. },
  2238. {
  2239. "name": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor",
  2240. "category": "Activation"
  2241. },
  2242. {
  2243. "name": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"
  2244. },
  2245. {
  2246. "name": "aten::median(Tensor self) -> Tensor"
  2247. },
  2248. {
  2249. "name": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2250. },
  2251. {
  2252. "name": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2253. },
  2254. {
  2255. "name": "aten::median.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2256. },
  2257. {
  2258. "name": "aten::median.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2259. },
  2260. {
  2261. "name": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2262. },
  2263. {
  2264. "name": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  2265. },
  2266. {
  2267. "name": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  2268. },
  2269. {
  2270. "name": "aten::mean.names_dim(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  2271. },
  2272. {
  2273. "name": "aten::mean.names_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2274. },
  2275. {
  2276. "name": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2277. },
  2278. {
  2279. "name": "aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2280. },
  2281. {
  2282. "name": "aten::logcumsumexp(Tensor self, int dim) -> Tensor"
  2283. },
  2284. {
  2285. "name": "aten::logcumsumexp.dimname(Tensor self, str dim) -> Tensor"
  2286. },
  2287. {
  2288. "name": "aten::logcumsumexp.dimname_out(Tensor self, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  2289. },
  2290. {
  2291. "name": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  2292. },
  2293. {
  2294. "name": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2295. },
  2296. {
  2297. "name": "aten::kthvalue.dimname(Tensor self, int k, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2298. },
  2299. {
  2300. "name": "aten::kthvalue.dimname_out(Tensor self, int k, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2301. },
  2302. {
  2303. "name": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2304. },
  2305. {
  2306. "name": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)"
  2307. },
  2308. {
  2309. "name": "aten::cummax.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)"
  2310. },
  2311. {
  2312. "name": "aten::cummax.dimname_out(Tensor self, str dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2313. },
  2314. {
  2315. "name": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2316. },
  2317. {
  2318. "name": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  2319. },
  2320. {
  2321. "name": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  2322. },
  2323. {
  2324. "name": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  2325. },
  2326. {
  2327. "name": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  2328. },
  2329. {
  2330. "name": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  2331. },
  2332. {
  2333. "name": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2334. },
  2335. {
  2336. "name": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  2337. },
  2338. {
  2339. "name": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2340. },
  2341. {
  2342. "name": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  2343. },
  2344. {
  2345. "name": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  2346. },
  2347. {
  2348. "name": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor"
  2349. },
  2350. {
  2351. "name": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor"
  2352. },
  2353. {
  2354. "name": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  2355. },
  2356. {
  2357. "name": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  2358. },
  2359. {
  2360. "name": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  2361. },
  2362. {
  2363. "name": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  2364. },
  2365. {
  2366. "name": "aten::scatter.dimname_src(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  2367. },
  2368. {
  2369. "name": "aten::scatter.dimname_value(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  2370. },
  2371. {
  2372. "name": "aten::any(Tensor self) -> Tensor"
  2373. },
  2374. {
  2375. "name": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  2376. },
  2377. {
  2378. "name": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  2379. },
  2380. {
  2381. "name": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2382. },
  2383. {
  2384. "name": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2385. },
  2386. {
  2387. "name": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2388. },
  2389. {
  2390. "name": "aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  2391. },
  2392. {
  2393. "name": "aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2394. },
  2395. {
  2396. "name": "aten::any.str(str[] self) -> bool"
  2397. },
  2398. {
  2399. "name": "aten::any.int(int[] self) -> bool"
  2400. },
  2401. {
  2402. "name": "aten::any.float(float[] self) -> bool"
  2403. },
  2404. {
  2405. "name": "aten::any.bool(bool[] self) -> bool"
  2406. },
  2407. {
  2408. "name": "aten::all(Tensor self) -> Tensor"
  2409. },
  2410. {
  2411. "name": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  2412. },
  2413. {
  2414. "name": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  2415. },
  2416. {
  2417. "name": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2418. },
  2419. {
  2420. "name": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2421. },
  2422. {
  2423. "name": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2424. },
  2425. {
  2426. "name": "aten::all.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  2427. },
  2428. {
  2429. "name": "aten::all.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2430. },
  2431. {
  2432. "name": "aten::all.int(int[] self) -> bool"
  2433. },
  2434. {
  2435. "name": "aten::all.float(float[] self) -> bool"
  2436. },
  2437. {
  2438. "name": "aten::all.bool(bool[] self) -> bool"
  2439. },
  2440. {
  2441. "name": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)"
  2442. },
  2443. {
  2444. "name": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)"
  2445. },
  2446. {
  2447. "name": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)"
  2448. },
  2449. {
  2450. "name": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  2451. },
  2452. {
  2453. "name": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2454. },
  2455. {
  2456. "name": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor"
  2457. },
  2458. {
  2459. "name": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"
  2460. },
  2461. {
  2462. "name": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"
  2463. },
  2464. {
  2465. "name": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  2466. },
  2467. {
  2468. "name": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  2469. },
  2470. {
  2471. "name": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  2472. },
  2473. {
  2474. "name": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  2475. "category": "Pool"
  2476. },
  2477. {
  2478. "name": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  2479. },
  2480. {
  2481. "name": "quantized::make_quantized_cell_params(Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  2482. },
  2483. {
  2484. "name": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor"
  2485. },
  2486. {
  2487. "name": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  2488. },
  2489. {
  2490. "name": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor"
  2491. },
  2492. {
  2493. "name": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  2494. },
  2495. {
  2496. "name": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  2497. },
  2498. {
  2499. "name": "aten::exp_(Tensor(a!) self) -> Tensor(a!)"
  2500. },
  2501. {
  2502. "name": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  2503. },
  2504. {
  2505. "name": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  2506. },
  2507. {
  2508. "name": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)"
  2509. },
  2510. {
  2511. "name": "aten::acos_(Tensor(a!) self) -> Tensor(a!)"
  2512. },
  2513. {
  2514. "name": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  2515. "category": "Tensor"
  2516. },
  2517. {
  2518. "name": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  2519. },
  2520. {
  2521. "name": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor"
  2522. },
  2523. {
  2524. "name": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  2525. },
  2526. {
  2527. "name": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor"
  2528. },
  2529. {
  2530. "name": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  2531. },
  2532. {
  2533. "name": "quantized::conv1d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  2534. "category": "Layer"
  2535. },
  2536. {
  2537. "name": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  2538. },
  2539. {
  2540. "name": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  2541. },
  2542. {
  2543. "name": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  2544. },
  2545. {
  2546. "name": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) grad_input) -> Tensor(a!)"
  2547. },
  2548. {
  2549. "name": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)"
  2550. },
  2551. {
  2552. "name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0., bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)"
  2553. },
  2554. {
  2555. "name": "quantized::conv2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  2556. },
  2557. {
  2558. "name": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"
  2559. },
  2560. {
  2561. "name": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"
  2562. },
  2563. {
  2564. "name": "quantized::conv_transpose2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  2565. },
  2566. {
  2567. "name": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)"
  2568. },
  2569. {
  2570. "name": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  2571. },
  2572. {
  2573. "name": "quantized::conv_transpose2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  2574. },
  2575. {
  2576. "name": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2577. },
  2578. {
  2579. "name": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2580. },
  2581. {
  2582. "name": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2583. },
  2584. {
  2585. "name": "aten::logspace(Scalar start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2586. },
  2587. {
  2588. "name": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  2589. },
  2590. {
  2591. "name": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  2592. },
  2593. {
  2594. "name": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  2595. },
  2596. {
  2597. "name": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  2598. },
  2599. {
  2600. "name": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2601. },
  2602. {
  2603. "name": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2604. },
  2605. {
  2606. "name": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2607. },
  2608. {
  2609. "name": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2610. },
  2611. {
  2612. "name": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  2613. },
  2614. {
  2615. "name": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  2616. },
  2617. {
  2618. "name": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  2619. },
  2620. {
  2621. "name": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  2622. },
  2623. {
  2624. "name": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2625. },
  2626. {
  2627. "name": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  2628. },
  2629. {
  2630. "name": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2631. },
  2632. {
  2633. "name": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  2634. },
  2635. {
  2636. "name": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2637. },
  2638. {
  2639. "name": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2640. },
  2641. {
  2642. "name": "aten::item(Tensor self) -> Scalar"
  2643. },
  2644. {
  2645. "name": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor"
  2646. },
  2647. {
  2648. "name": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor"
  2649. },
  2650. {
  2651. "name": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  2652. },
  2653. {
  2654. "name": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  2655. },
  2656. {
  2657. "name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  2658. },
  2659. {
  2660. "name": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  2661. },
  2662. {
  2663. "name": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)",
  2664. "category": "Layer"
  2665. },
  2666. {
  2667. "name": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  2668. },
  2669. {
  2670. "name": "aten::nonzero(Tensor self) -> Tensor"
  2671. },
  2672. {
  2673. "name": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2674. },
  2675. {
  2676. "name": "aten::logical_not(Tensor self) -> Tensor"
  2677. },
  2678. {
  2679. "name": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2680. },
  2681. {
  2682. "name": "quantized::conv_transpose2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  2683. },
  2684. {
  2685. "name": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2686. },
  2687. {
  2688. "name": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2689. },
  2690. {
  2691. "name": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor"
  2692. },
  2693. {
  2694. "name": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor"
  2695. },
  2696. {
  2697. "name": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2698. },
  2699. {
  2700. "name": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2701. },
  2702. {
  2703. "name": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2704. },
  2705. {
  2706. "name": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2707. },
  2708. {
  2709. "name": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  2710. },
  2711. {
  2712. "name": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  2713. },
  2714. {
  2715. "name": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2716. },
  2717. {
  2718. "name": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor"
  2719. },
  2720. {
  2721. "name": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor"
  2722. },
  2723. {
  2724. "name": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  2725. },
  2726. {
  2727. "name": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  2728. },
  2729. {
  2730. "name": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2731. },
  2732. {
  2733. "name": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2734. },
  2735. {
  2736. "name": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor",
  2737. "category": "Tensor"
  2738. },
  2739. {
  2740. "name": "quantized::conv2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  2741. },
  2742. {
  2743. "name": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
  2744. "category": "Transform"
  2745. },
  2746. {
  2747. "name": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)"
  2748. },
  2749. {
  2750. "name": "aten::type_as(Tensor self, Tensor other) -> Tensor"
  2751. },
  2752. {
  2753. "name": "aten::vstack(Tensor[] tensors) -> Tensor"
  2754. },
  2755. {
  2756. "name": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  2757. },
  2758. {
  2759. "name": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  2760. },
  2761. {
  2762. "name": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  2763. },
  2764. {
  2765. "name": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  2766. },
  2767. {
  2768. "name": "aten::var_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  2769. },
  2770. {
  2771. "name": "aten::var_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  2772. },
  2773. {
  2774. "name": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  2775. },
  2776. {
  2777. "name": "aten::var(Tensor self, bool unbiased=True) -> Tensor"
  2778. },
  2779. {
  2780. "name": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  2781. },
  2782. {
  2783. "name": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  2784. },
  2785. {
  2786. "name": "aten::var.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  2787. },
  2788. {
  2789. "name": "aten::var.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2790. },
  2791. {
  2792. "name": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2793. },
  2794. {
  2795. "name": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  2796. },
  2797. {
  2798. "name": "aten::var.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  2799. },
  2800. {
  2801. "name": "aten::var.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  2802. },
  2803. {
  2804. "name": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]"
  2805. },
  2806. {
  2807. "name": "aten::tile(Tensor self, SymInt[] dims) -> Tensor"
  2808. },
  2809. {
  2810. "name": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor"
  2811. },
  2812. {
  2813. "name": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  2814. },
  2815. {
  2816. "name": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor"
  2817. },
  2818. {
  2819. "name": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)"
  2820. },
  2821. {
  2822. "name": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor"
  2823. },
  2824. {
  2825. "name": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  2826. },
  2827. {
  2828. "name": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor",
  2829. "category": "Tensor"
  2830. },
  2831. {
  2832. "name": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)"
  2833. },
  2834. {
  2835. "name": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  2836. },
  2837. {
  2838. "name": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  2839. },
  2840. {
  2841. "name": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor"
  2842. },
  2843. {
  2844. "name": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  2845. },
  2846. {
  2847. "name": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)"
  2848. },
  2849. {
  2850. "name": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)",
  2851. "category": "Activation"
  2852. },
  2853. {
  2854. "name": "aten::relu6(Tensor self) -> Tensor",
  2855. "category": "Activation"
  2856. },
  2857. {
  2858. "name": "aten::alias_copy(Tensor self) -> Tensor"
  2859. },
  2860. {
  2861. "name": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2862. },
  2863. {
  2864. "name": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor"
  2865. },
  2866. {
  2867. "name": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2., float eps=9.9999999999999995e-07, bool keepdim=False) -> Tensor"
  2868. },
  2869. {
  2870. "name": "aten::outer(Tensor self, Tensor vec2) -> Tensor"
  2871. },
  2872. {
  2873. "name": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  2874. },
  2875. {
  2876. "name": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  2877. },
  2878. {
  2879. "name": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  2880. },
  2881. {
  2882. "name": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  2883. },
  2884. {
  2885. "name": "aten::min.other(Tensor self, Tensor other) -> Tensor"
  2886. },
  2887. {
  2888. "name": "aten::min(Tensor self) -> Tensor"
  2889. },
  2890. {
  2891. "name": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2892. },
  2893. {
  2894. "name": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2895. },
  2896. {
  2897. "name": "aten::min.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2898. },
  2899. {
  2900. "name": "aten::min.names_dim_min(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2901. },
  2902. {
  2903. "name": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2904. },
  2905. {
  2906. "name": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2907. },
  2908. {
  2909. "name": "aten::meshgrid(Tensor[] tensors) -> Tensor[]",
  2910. "category": "Tensor"
  2911. },
  2912. {
  2913. "name": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]",
  2914. "category": "Tensor"
  2915. },
  2916. {
  2917. "name": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
  2918. "category": "Pool"
  2919. },
  2920. {
  2921. "name": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor, Tensor)",
  2922. "category": "Pool"
  2923. },
  2924. {
  2925. "name": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> Tensor",
  2926. "category": "Pool"
  2927. },
  2928. {
  2929. "name": "aten::max.other(Tensor self, Tensor other) -> Tensor"
  2930. },
  2931. {
  2932. "name": "aten::max(Tensor self) -> Tensor"
  2933. },
  2934. {
  2935. "name": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2936. },
  2937. {
  2938. "name": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  2939. },
  2940. {
  2941. "name": "aten::max.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  2942. },
  2943. {
  2944. "name": "aten::max.names_dim_max(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  2945. },
  2946. {
  2947. "name": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2948. },
  2949. {
  2950. "name": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2951. },
  2952. {
  2953. "name": "aten::logdet(Tensor self) -> Tensor"
  2954. },
  2955. {
  2956. "name": "aten::log_sigmoid(Tensor self) -> Tensor"
  2957. },
  2958. {
  2959. "name": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2960. },
  2961. {
  2962. "name": "quantized::linear_unpack_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  2963. },
  2964. {
  2965. "name": "quantized::linear_unpack_fp16.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  2966. },
  2967. {
  2968. "name": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)"
  2969. },
  2970. {
  2971. "name": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)"
  2972. },
  2973. {
  2974. "name": "quantized::linear_unpack(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  2975. },
  2976. {
  2977. "name": "quantized::linear_unpack.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  2978. },
  2979. {
  2980. "name": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  2981. },
  2982. {
  2983. "name": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  2984. },
  2985. {
  2986. "name": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2987. },
  2988. {
  2989. "name": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2990. },
  2991. {
  2992. "name": "quantized::conv_transpose2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  2993. },
  2994. {
  2995. "name": "quantized::conv2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  2996. },
  2997. {
  2998. "name": "quantized::prelu(Tensor qx, Tensor weight, float output_scale, int output_zero_point) -> Tensor"
  2999. },
  3000. {
  3001. "name": "quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor"
  3002. },
  3003. {
  3004. "name": "quantized::mul_scalar(Tensor qa, Scalar b) -> Tensor qc"
  3005. },
  3006. {
  3007. "name": "quantized::mul_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  3008. },
  3009. {
  3010. "name": "quantized::linear(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  3011. "category": "Layer"
  3012. },
  3013. {
  3014. "name": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  3015. "category": "Normalization"
  3016. },
  3017. {
  3018. "name": "quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  3019. },
  3020. {
  3021. "name": "aten::isfinite(Tensor self) -> Tensor"
  3022. },
  3023. {
  3024. "name": "aten::isfinite.float(float a) -> bool"
  3025. },
  3026. {
  3027. "name": "aten::isfinite.complex(complex a) -> bool"
  3028. },
  3029. {
  3030. "name": "quantized::hardswish(Tensor input, float output_scale, int output_zero_point) -> Tensor",
  3031. "category": "Activation"
  3032. },
  3033. {
  3034. "name": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor"
  3035. },
  3036. {
  3037. "name": "quantized::conv2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  3038. },
  3039. {
  3040. "name": "aten::hstack(Tensor[] tensors) -> Tensor"
  3041. },
  3042. {
  3043. "name": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  3044. },
  3045. {
  3046. "name": "quantized::batch_norm3d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  3047. "category": "Normalization"
  3048. },
  3049. {
  3050. "name": "quantized::batch_norm3d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  3051. "category": "Normalization"
  3052. },
  3053. {
  3054. "name": "quantized::batch_norm_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  3055. "category": "Normalization"
  3056. },
  3057. {
  3058. "name": "aten::ger(Tensor self, Tensor vec2) -> Tensor"
  3059. },
  3060. {
  3061. "name": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  3062. },
  3063. {
  3064. "name": "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  3065. },
  3066. {
  3067. "name": "quantized::add.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  3068. },
  3069. {
  3070. "name": "quantized::add.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  3071. },
  3072. {
  3073. "name": "quantized::add.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  3074. },
  3075. {
  3076. "name": "quantized::add.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  3077. },
  3078. {
  3079. "name": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor"
  3080. },
  3081. {
  3082. "name": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
  3083. "category": "Shape"
  3084. },
  3085. {
  3086. "name": "aten::flatten.DimnameList(Tensor(a) self, str[] dims, str out_dim) -> Tensor(a)",
  3087. "category": "Shape"
  3088. },
  3089. {
  3090. "name": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, str out_dim) -> Tensor(a)",
  3091. "category": "Shape"
  3092. },
  3093. {
  3094. "name": "aten::flatten.using_names(Tensor(a) self, str start_dim, str end_dim, str out_dim) -> Tensor(a)",
  3095. "category": "Shape"
  3096. },
  3097. {
  3098. "name": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor"
  3099. },
  3100. {
  3101. "name": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3102. },
  3103. {
  3104. "name": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3105. },
  3106. {
  3107. "name": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3108. },
  3109. {
  3110. "name": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3111. },
  3112. {
  3113. "name": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor"
  3114. },
  3115. {
  3116. "name": "aten::diag(Tensor self, int diagonal=0) -> Tensor"
  3117. },
  3118. {
  3119. "name": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  3120. },
  3121. {
  3122. "name": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  3123. },
  3124. {
  3125. "name": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  3126. },
  3127. {
  3128. "name": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  3129. },
  3130. {
  3131. "name": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  3132. },
  3133. {
  3134. "name": "aten::cartesian_prod(Tensor[] tensors) -> Tensor"
  3135. },
  3136. {
  3137. "name": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  3138. },
  3139. {
  3140. "name": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]"
  3141. },
  3142. {
  3143. "name": "aten::selu_(Tensor(a!) self) -> Tensor(a!)",
  3144. "category": "Activation"
  3145. },
  3146. {
  3147. "name": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)"
  3148. },
  3149. {
  3150. "name": "aten::arctan(Tensor self) -> Tensor"
  3151. },
  3152. {
  3153. "name": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3154. },
  3155. {
  3156. "name": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  3157. },
  3158. {
  3159. "name": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  3160. },
  3161. {
  3162. "name": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  3163. },
  3164. {
  3165. "name": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  3166. },
  3167. {
  3168. "name": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)"
  3169. },
  3170. {
  3171. "name": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor"
  3172. },
  3173. {
  3174. "name": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  3175. },
  3176. {
  3177. "name": "aten::round_(Tensor(a!) self) -> Tensor(a!)"
  3178. },
  3179. {
  3180. "name": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)"
  3181. },
  3182. {
  3183. "name": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor",
  3184. "category": "Pool"
  3185. },
  3186. {
  3187. "name": "aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  3188. },
  3189. {
  3190. "name": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)",
  3191. "category": "Pool"
  3192. },
  3193. {
  3194. "name": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -> Tensor",
  3195. "category": "Pool"
  3196. },
  3197. {
  3198. "name": "aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)"
  3199. },
  3200. {
  3201. "name": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor"
  3202. },
  3203. {
  3204. "name": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor"
  3205. },
  3206. {
  3207. "name": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)"
  3208. },
  3209. {
  3210. "name": "aten::argsort.dimname(Tensor self, str dim, bool descending=False) -> Tensor"
  3211. },
  3212. {
  3213. "name": "aten::selu(Tensor self) -> Tensor",
  3214. "category": "Activation"
  3215. },
  3216. {
  3217. "name": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor"
  3218. },
  3219. {
  3220. "name": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  3221. },
  3222. {
  3223. "name": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3224. },
  3225. {
  3226. "name": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3227. },
  3228. {
  3229. "name": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor"
  3230. },
  3231. {
  3232. "name": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor"
  3233. },
  3234. {
  3235. "name": "aten::__and__.bool(bool a, bool b) -> bool"
  3236. },
  3237. {
  3238. "name": "aten::__and__.int(int a, int b) -> int"
  3239. },
  3240. {
  3241. "name": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)"
  3242. },
  3243. {
  3244. "name": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  3245. "category": "Dropout"
  3246. },
  3247. {
  3248. "name": "aten::square(Tensor self) -> Tensor"
  3249. },
  3250. {
  3251. "name": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3252. },
  3253. {
  3254. "name": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  3255. },
  3256. {
  3257. "name": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  3258. },
  3259. {
  3260. "name": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3261. },
  3262. {
  3263. "name": "aten::logical_xor(Tensor self, Tensor other) -> Tensor"
  3264. },
  3265. {
  3266. "name": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3267. },
  3268. {
  3269. "name": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3270. },
  3271. {
  3272. "name": "aten::logical_or(Tensor self, Tensor other) -> Tensor"
  3273. },
  3274. {
  3275. "name": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3276. },
  3277. {
  3278. "name": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3279. },
  3280. {
  3281. "name": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)"
  3282. },
  3283. {
  3284. "name": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)"
  3285. },
  3286. {
  3287. "name": "quantized::mul_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  3288. },
  3289. {
  3290. "name": "quantized::add_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  3291. },
  3292. {
  3293. "name": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3294. },
  3295. {
  3296. "name": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3297. },
  3298. {
  3299. "name": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor"
  3300. },
  3301. {
  3302. "name": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3303. },
  3304. {
  3305. "name": "quantized::add_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  3306. },
  3307. {
  3308. "name": "quantized::add_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  3309. },
  3310. {
  3311. "name": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate=\"none\") -> Tensor"
  3312. },
  3313. {
  3314. "name": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate=\"none\", Tensor(a!) grad_input) -> Tensor(a!)"
  3315. },
  3316. {
  3317. "name": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor"
  3318. },
  3319. {
  3320. "name": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3321. },
  3322. {
  3323. "name": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor"
  3324. },
  3325. {
  3326. "name": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3327. },
  3328. {
  3329. "name": "quantized::instance_norm(Tensor input, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor"
  3330. },
  3331. {
  3332. "name": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor"
  3333. },
  3334. {
  3335. "name": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3336. },
  3337. {
  3338. "name": "quantized::conv_transpose2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor"
  3339. },
  3340. {
  3341. "name": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor"
  3342. },
  3343. {
  3344. "name": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3345. },
  3346. {
  3347. "name": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor"
  3348. },
  3349. {
  3350. "name": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)"
  3351. },
  3352. {
  3353. "name": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"
  3354. },
  3355. {
  3356. "name": "quantized::mul_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  3357. },
  3358. {
  3359. "name": "aten::logaddexp(Tensor self, Tensor other) -> Tensor"
  3360. },
  3361. {
  3362. "name": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3363. },
  3364. {
  3365. "name": "quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  3366. },
  3367. {
  3368. "name": "quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  3369. },
  3370. {
  3371. "name": "quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  3372. },
  3373. {
  3374. "name": "quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  3375. },
  3376. {
  3377. "name": "quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  3378. },
  3379. {
  3380. "name": "aten::gcd(Tensor self, Tensor other) -> Tensor"
  3381. },
  3382. {
  3383. "name": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3384. },
  3385. {
  3386. "name": "aten::gcd.int(int a, int b) -> int"
  3387. },
  3388. {
  3389. "name": "quantized::conv_transpose2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  3390. },
  3391. {
  3392. "name": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor"
  3393. },
  3394. {
  3395. "name": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor"
  3396. },
  3397. {
  3398. "name": "aten::__xor__.bool(bool a, bool b) -> bool"
  3399. },
  3400. {
  3401. "name": "aten::__xor__.int(int a, int b) -> int"
  3402. },
  3403. {
  3404. "name": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3405. },
  3406. {
  3407. "name": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3408. },
  3409. {
  3410. "name": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor"
  3411. },
  3412. {
  3413. "name": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor"
  3414. },
  3415. {
  3416. "name": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3417. },
  3418. {
  3419. "name": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3420. },
  3421. {
  3422. "name": "aten::fmod.int(int a, int b) -> float"
  3423. },
  3424. {
  3425. "name": "aten::fmod.float(float a, float b) -> float"
  3426. },
  3427. {
  3428. "name": "aten::fmod.int_float(int a, float b) -> float"
  3429. },
  3430. {
  3431. "name": "aten::fmod.float_int(float a, int b) -> float"
  3432. },
  3433. {
  3434. "name": "aten::fmod(Scalar a, Scalar b) -> float"
  3435. },
  3436. {
  3437. "name": "aten::floor_divide(Tensor self, Tensor other) -> Tensor"
  3438. },
  3439. {
  3440. "name": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  3441. },
  3442. {
  3443. "name": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3444. },
  3445. {
  3446. "name": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3447. },
  3448. {
  3449. "name": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  3450. },
  3451. {
  3452. "name": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  3453. },
  3454. {
  3455. "name": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3456. },
  3457. {
  3458. "name": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3459. },
  3460. {
  3461. "name": "aten::__rshift__.int(int a, int b) -> int"
  3462. },
  3463. {
  3464. "name": "aten::quantized_gru.input(Tensor input, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  3465. "category": "Layer"
  3466. },
  3467. {
  3468. "name": "aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  3469. "category": "Layer"
  3470. },
  3471. {
  3472. "name": "aten::quantized_gru.input_legacy(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  3473. "category": "Layer"
  3474. },
  3475. {
  3476. "name": "aten::quantized_gru.data_legacy(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  3477. "category": "Layer"
  3478. },
  3479. {
  3480. "name": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor"
  3481. },
  3482. {
  3483. "name": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor"
  3484. },
  3485. {
  3486. "name": "aten::__or__.bool(bool a, bool b) -> bool"
  3487. },
  3488. {
  3489. "name": "aten::__or__.int(int a, int b) -> int"
  3490. },
  3491. {
  3492. "name": "aten::floor_(Tensor(a!) self) -> Tensor(a!)"
  3493. },
  3494. {
  3495. "name": "aten::quantized_lstm.input(Tensor input, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  3496. "category": "Layer"
  3497. },
  3498. {
  3499. "name": "aten::quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  3500. "category": "Layer"
  3501. },
  3502. {
  3503. "name": "aten::quantized_lstm.input_legacy(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  3504. "category": "Layer"
  3505. },
  3506. {
  3507. "name": "aten::quantized_lstm.data_legacy(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  3508. "category": "Layer"
  3509. },
  3510. {
  3511. "name": "aten::std(Tensor self, bool unbiased=True) -> Tensor"
  3512. },
  3513. {
  3514. "name": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  3515. },
  3516. {
  3517. "name": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  3518. },
  3519. {
  3520. "name": "aten::std.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  3521. },
  3522. {
  3523. "name": "aten::std.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  3524. },
  3525. {
  3526. "name": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  3527. },
  3528. {
  3529. "name": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  3530. },
  3531. {
  3532. "name": "aten::std.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  3533. },
  3534. {
  3535. "name": "aten::std.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  3536. },
  3537. {
  3538. "name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  3539. },
  3540. {
  3541. "name": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  3542. },
  3543. {
  3544. "name": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3545. },
  3546. {
  3547. "name": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3548. },
  3549. {
  3550. "name": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor"
  3551. },
  3552. {
  3553. "name": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)"
  3554. },
  3555. {
  3556. "name": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)"
  3557. },
  3558. {
  3559. "name": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)"
  3560. },
  3561. {
  3562. "name": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)"
  3563. },
  3564. {
  3565. "name": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)"
  3566. },
  3567. {
  3568. "name": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3569. },
  3570. {
  3571. "name": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3572. },
  3573. {
  3574. "name": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  3575. },
  3576. {
  3577. "name": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  3578. },
  3579. {
  3580. "name": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  3581. },
  3582. {
  3583. "name": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3584. },
  3585. {
  3586. "name": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3587. },
  3588. {
  3589. "name": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3590. },
  3591. {
  3592. "name": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3593. },
  3594. {
  3595. "name": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3596. },
  3597. {
  3598. "name": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor"
  3599. },
  3600. {
  3601. "name": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor"
  3602. },
  3603. {
  3604. "name": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  3605. },
  3606. {
  3607. "name": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3608. },
  3609. {
  3610. "name": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3611. },
  3612. {
  3613. "name": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3614. },
  3615. {
  3616. "name": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor"
  3617. },
  3618. {
  3619. "name": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor"
  3620. },
  3621. {
  3622. "name": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  3623. },
  3624. {
  3625. "name": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3626. },
  3627. {
  3628. "name": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3629. },
  3630. {
  3631. "name": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3632. },
  3633. {
  3634. "name": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3635. },
  3636. {
  3637. "name": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3638. },
  3639. {
  3640. "name": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3641. },
  3642. {
  3643. "name": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3644. },
  3645. {
  3646. "name": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)"
  3647. },
  3648. {
  3649. "name": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)"
  3650. },
  3651. {
  3652. "name": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3653. },
  3654. {
  3655. "name": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3656. },
  3657. {
  3658. "name": "quantized::conv_transpose2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  3659. },
  3660. {
  3661. "name": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  3662. },
  3663. {
  3664. "name": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  3665. },
  3666. {
  3667. "name": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  3668. },
  3669. {
  3670. "name": "aten::std_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  3671. },
  3672. {
  3673. "name": "aten::std_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  3674. },
  3675. {
  3676. "name": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  3677. },
  3678. {
  3679. "name": "aten::masked_select(Tensor self, Tensor mask) -> Tensor"
  3680. },
  3681. {
  3682. "name": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)"
  3683. },
  3684. {
  3685. "name": "prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.Conv2dOpContext"
  3686. },
  3687. {
  3688. "name": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor"
  3689. },
  3690. {
  3691. "name": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)"
  3692. },
  3693. {
  3694. "name": "prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.LinearOpContext W_prepack) -> Tensor Y",
  3695. "category": "Layer"
  3696. },
  3697. {
  3698. "name": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor"
  3699. },
  3700. {
  3701. "name": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)"
  3702. },
  3703. {
  3704. "name": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3705. },
  3706. {
  3707. "name": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3708. },
  3709. {
  3710. "name": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  3711. },
  3712. {
  3713. "name": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  3714. },
  3715. {
  3716. "name": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  3717. },
  3718. {
  3719. "name": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3720. },
  3721. {
  3722. "name": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3723. },
  3724. {
  3725. "name": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3726. },
  3727. {
  3728. "name": "aten::linalg_inv(Tensor A) -> Tensor"
  3729. },
  3730. {
  3731. "name": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"
  3732. },
  3733. {
  3734. "name": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor"
  3735. },
  3736. {
  3737. "name": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor"
  3738. },
  3739. {
  3740. "name": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  3741. },
  3742. {
  3743. "name": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3744. },
  3745. {
  3746. "name": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3747. },
  3748. {
  3749. "name": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3750. },
  3751. {
  3752. "name": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor"
  3753. },
  3754. {
  3755. "name": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"
  3756. },
  3757. {
  3758. "name": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3759. },
  3760. {
  3761. "name": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3762. },
  3763. {
  3764. "name": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3765. },
  3766. {
  3767. "name": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3768. },
  3769. {
  3770. "name": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3771. },
  3772. {
  3773. "name": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3774. },
  3775. {
  3776. "name": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  3777. },
  3778. {
  3779. "name": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3780. },
  3781. {
  3782. "name": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  3783. },
  3784. {
  3785. "name": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3786. },
  3787. {
  3788. "name": "aten::zero_(Tensor(a!) self) -> Tensor(a!)"
  3789. },
  3790. {
  3791. "name": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  3792. },
  3793. {
  3794. "name": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3795. },
  3796. {
  3797. "name": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  3798. },
  3799. {
  3800. "name": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3801. },
  3802. {
  3803. "name": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  3804. },
  3805. {
  3806. "name": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  3807. },
  3808. {
  3809. "name": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)"
  3810. },
  3811. {
  3812. "name": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, float label_smoothing=0.) -> Tensor"
  3813. },
  3814. {
  3815. "name": "aten::acosh(Tensor self) -> Tensor"
  3816. },
  3817. {
  3818. "name": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3819. },
  3820. {
  3821. "name": "aten::acosh.int(int a) -> float"
  3822. },
  3823. {
  3824. "name": "aten::acosh.float(float a) -> float"
  3825. },
  3826. {
  3827. "name": "aten::acosh.complex(complex a) -> complex"
  3828. },
  3829. {
  3830. "name": "aten::acosh.Scalar(Scalar a) -> Scalar"
  3831. },
  3832. {
  3833. "name": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  3834. "category": "Tensor"
  3835. },
  3836. {
  3837. "name": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  3838. },
  3839. {
  3840. "name": "aten::acos(Tensor self) -> Tensor"
  3841. },
  3842. {
  3843. "name": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3844. },
  3845. {
  3846. "name": "aten::acos.int(int a) -> float"
  3847. },
  3848. {
  3849. "name": "aten::acos.float(float a) -> float"
  3850. },
  3851. {
  3852. "name": "aten::acos.complex(complex a) -> complex"
  3853. },
  3854. {
  3855. "name": "aten::acos.Scalar(Scalar a) -> Scalar"
  3856. },
  3857. {
  3858. "name": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  3859. "category": "Tensor"
  3860. },
  3861. {
  3862. "name": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  3863. },
  3864. {
  3865. "name": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  3866. "category": "Tensor"
  3867. },
  3868. {
  3869. "name": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  3870. },
  3871. {
  3872. "name": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)",
  3873. "category": "Shape"
  3874. },
  3875. {
  3876. "name": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor",
  3877. "category": "Pool"
  3878. },
  3879. {
  3880. "name": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  3881. },
  3882. {
  3883. "name": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)",
  3884. "category": "Attention"
  3885. },
  3886. {
  3887. "name": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  3888. },
  3889. {
  3890. "name": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor",
  3891. "category": "Pool"
  3892. },
  3893. {
  3894. "name": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  3895. },
  3896. {
  3897. "name": "aten::pinverse(Tensor self, float rcond=1.0000000000000001e-15) -> Tensor"
  3898. },
  3899. {
  3900. "name": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  3901. },
  3902. {
  3903. "name": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  3904. },
  3905. {
  3906. "name": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor"
  3907. },
  3908. {
  3909. "name": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3910. },
  3911. {
  3912. "name": "aten::logical_and(Tensor self, Tensor other) -> Tensor"
  3913. },
  3914. {
  3915. "name": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3916. },
  3917. {
  3918. "name": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  3919. "category": "Tensor"
  3920. },
  3921. {
  3922. "name": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  3923. },
  3924. {
  3925. "name": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  3926. },
  3927. {
  3928. "name": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  3929. },
  3930. {
  3931. "name": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  3932. },
  3933. {
  3934. "name": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  3935. },
  3936. {
  3937. "name": "aten::polar(Tensor abs, Tensor angle) -> Tensor"
  3938. },
  3939. {
  3940. "name": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)"
  3941. },
  3942. {
  3943. "name": "aten::polar.int(int a, int b) -> complex"
  3944. },
  3945. {
  3946. "name": "aten::polar.float(float a, float b) -> complex"
  3947. },
  3948. {
  3949. "name": "aten::polar.int_float(int a, float b) -> complex"
  3950. },
  3951. {
  3952. "name": "aten::polar.float_int(float a, int b) -> complex"
  3953. },
  3954. {
  3955. "name": "aten::polar.Scalar_Scalar(Scalar a, Scalar b) -> Scalar"
  3956. },
  3957. {
  3958. "name": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)"
  3959. },
  3960. {
  3961. "name": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  3962. },
  3963. {
  3964. "name": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  3965. "category": "Pool"
  3966. },
  3967. {
  3968. "name": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  3969. },
  3970. {
  3971. "name": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor",
  3972. "category": "Pool"
  3973. },
  3974. {
  3975. "name": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  3976. "category": "Normalization"
  3977. },
  3978. {
  3979. "name": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  3980. },
  3981. {
  3982. "name": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  3983. },
  3984. {
  3985. "name": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3986. },
  3987. {
  3988. "name": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3989. },
  3990. {
  3991. "name": "aten::sort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  3992. },
  3993. {
  3994. "name": "aten::sort.dimname_values(Tensor self, str dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3995. },
  3996. {
  3997. "name": "aten::sort.dimname_stable(Tensor self, *, bool? stable, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  3998. },
  3999. {
  4000. "name": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, str dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4001. },
  4002. {
  4003. "name": "aten::sort.int(int[](a!) self, bool reverse=False) -> ()"
  4004. },
  4005. {
  4006. "name": "aten::sort.float(float[](a!) self, bool reverse=False) -> ()"
  4007. },
  4008. {
  4009. "name": "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()"
  4010. },
  4011. {
  4012. "name": "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()"
  4013. },
  4014. {
  4015. "name": "aten::sort.str(str[](a!) self, bool reverse=False) -> ()"
  4016. },
  4017. {
  4018. "name": "aten::sort.any(t[](a!) self, bool reverse=False) -> ()"
  4019. },
  4020. {
  4021. "name": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  4022. },
  4023. {
  4024. "name": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  4025. },
  4026. {
  4027. "name": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  4028. },
  4029. {
  4030. "name": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  4031. },
  4032. {
  4033. "name": "aten::scatter_add.dimname(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  4034. },
  4035. {
  4036. "name": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor"
  4037. },
  4038. {
  4039. "name": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)"
  4040. },
  4041. {
  4042. "name": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"
  4043. },
  4044. {
  4045. "name": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)"
  4046. },
  4047. {
  4048. "name": "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"
  4049. },
  4050. {
  4051. "name": "quantized::cat_relu(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  4052. "category": "Tensor"
  4053. },
  4054. {
  4055. "name": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"
  4056. },
  4057. {
  4058. "name": "aten::atan2(Tensor self, Tensor other) -> Tensor"
  4059. },
  4060. {
  4061. "name": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4062. },
  4063. {
  4064. "name": "aten::atan2.int(int a, int b) -> float"
  4065. },
  4066. {
  4067. "name": "aten::atan2.float(float a, float b) -> float"
  4068. },
  4069. {
  4070. "name": "aten::atan2.int_float(int a, float b) -> float"
  4071. },
  4072. {
  4073. "name": "aten::atan2.float_int(float a, int b) -> float"
  4074. },
  4075. {
  4076. "name": "aten::atan2.Scalar_Scalar(Scalar a, Scalar b) -> float"
  4077. },
  4078. {
  4079. "name": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  4080. "category": "Dropout"
  4081. },
  4082. {
  4083. "name": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor"
  4084. },
  4085. {
  4086. "name": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor"
  4087. },
  4088. {
  4089. "name": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor"
  4090. },
  4091. {
  4092. "name": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor"
  4093. },
  4094. {
  4095. "name": "aten::where(Tensor condition) -> Tensor[]"
  4096. },
  4097. {
  4098. "name": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4099. },
  4100. {
  4101. "name": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor"
  4102. },
  4103. {
  4104. "name": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor"
  4105. },
  4106. {
  4107. "name": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, str[1] dim, bool keepdim=False) -> Tensor"
  4108. },
  4109. {
  4110. "name": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  4111. },
  4112. {
  4113. "name": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4114. },
  4115. {
  4116. "name": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4117. },
  4118. {
  4119. "name": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor"
  4120. },
  4121. {
  4122. "name": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4123. },
  4124. {
  4125. "name": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  4126. },
  4127. {
  4128. "name": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  4129. },
  4130. {
  4131. "name": "aten::norm.names_dtype_out(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4132. },
  4133. {
  4134. "name": "aten::norm.names_out(Tensor self, Scalar? p, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4135. },
  4136. {
  4137. "name": "prepacked::conv2d_transpose_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.TransposeConv2dOpContext W_prepack) -> Tensor Y",
  4138. "category": "Layer"
  4139. },
  4140. {
  4141. "name": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4142. },
  4143. {
  4144. "name": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4145. },
  4146. {
  4147. "name": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  4148. },
  4149. {
  4150. "name": "aten::cumsum.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  4151. },
  4152. {
  4153. "name": "aten::cumsum.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4154. },
  4155. {
  4156. "name": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4157. },
  4158. {
  4159. "name": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  4160. },
  4161. {
  4162. "name": "aten::cumprod.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  4163. },
  4164. {
  4165. "name": "aten::cumprod.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4166. },
  4167. {
  4168. "name": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4169. },
  4170. {
  4171. "name": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  4172. "category": "Activation"
  4173. },
  4174. {
  4175. "name": "aten::log_softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  4176. "category": "Activation"
  4177. },
  4178. {
  4179. "name": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  4180. },
  4181. {
  4182. "name": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4183. },
  4184. {
  4185. "name": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4186. },
  4187. {
  4188. "name": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  4189. },
  4190. {
  4191. "name": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4192. },
  4193. {
  4194. "name": "aten::prod.dim_Dimname(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4195. },
  4196. {
  4197. "name": "aten::prod.Dimname_out(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4198. },
  4199. {
  4200. "name": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4201. },
  4202. {
  4203. "name": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4204. },
  4205. {
  4206. "name": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4207. },
  4208. {
  4209. "name": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4210. },
  4211. {
  4212. "name": "aten::rand_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4213. },
  4214. {
  4215. "name": "aten::rand_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4216. },
  4217. {
  4218. "name": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  4219. },
  4220. {
  4221. "name": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  4222. },
  4223. {
  4224. "name": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4225. },
  4226. {
  4227. "name": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  4228. "category": "Layer"
  4229. },
  4230. {
  4231. "name": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  4232. "category": "Layer"
  4233. },
  4234. {
  4235. "name": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4236. },
  4237. {
  4238. "name": "aten::exponential_(Tensor(a!) self, float lambd=1., *, Generator? generator=None) -> Tensor(a!)"
  4239. },
  4240. {
  4241. "name": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  4242. },
  4243. {
  4244. "name": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  4245. },
  4246. {
  4247. "name": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4248. },
  4249. {
  4250. "name": "aten::exp(Tensor self) -> Tensor"
  4251. },
  4252. {
  4253. "name": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4254. },
  4255. {
  4256. "name": "aten::exp.int(int a) -> float"
  4257. },
  4258. {
  4259. "name": "aten::exp.float(float a) -> float"
  4260. },
  4261. {
  4262. "name": "aten::exp.complex(complex a) -> complex"
  4263. },
  4264. {
  4265. "name": "aten::exp.Scalar(Scalar a) -> Scalar"
  4266. },
  4267. {
  4268. "name": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  4269. "category": "Layer"
  4270. },
  4271. {
  4272. "name": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  4273. "category": "Layer"
  4274. },
  4275. {
  4276. "name": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4277. },
  4278. {
  4279. "name": "aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)"
  4280. },
  4281. {
  4282. "name": "quantized::batch_norm1d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  4283. "category": "Normalization"
  4284. },
  4285. {
  4286. "name": "aten::glu(Tensor self, int dim=-1) -> Tensor",
  4287. "category": "Activation"
  4288. },
  4289. {
  4290. "name": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)"
  4291. },
  4292. {
  4293. "name": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  4294. },
  4295. {
  4296. "name": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4297. },
  4298. {
  4299. "name": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  4300. },
  4301. {
  4302. "name": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)"
  4303. },
  4304. {
  4305. "name": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor",
  4306. "category": "Layer"
  4307. },
  4308. {
  4309. "name": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  4310. "category": "Layer"
  4311. },
  4312. {
  4313. "name": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4314. },
  4315. {
  4316. "name": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"
  4317. },
  4318. {
  4319. "name": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)"
  4320. },
  4321. {
  4322. "name": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  4323. },
  4324. {
  4325. "name": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  4326. },
  4327. {
  4328. "name": "aten::pdist(Tensor self, float p=2.) -> Tensor"
  4329. },
  4330. {
  4331. "name": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1) -> Tensor"
  4332. },
  4333. {
  4334. "name": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  4335. },
  4336. {
  4337. "name": "quantized::conv2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  4338. },
  4339. {
  4340. "name": "quantized::conv3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase"
  4341. },
  4342. {
  4343. "name": "aten::huber_loss(Tensor self, Tensor target, int reduction=1, float delta=1.) -> Tensor"
  4344. },
  4345. {
  4346. "name": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=1, float delta=1., *, Tensor(a!) out) -> Tensor(a!)"
  4347. },
  4348. {
  4349. "name": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  4350. },
  4351. {
  4352. "name": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  4353. },
  4354. {
  4355. "name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor",
  4356. "category": "Normalization"
  4357. },
  4358. {
  4359. "name": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4360. },
  4361. {
  4362. "name": "quantized::mul_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  4363. },
  4364. {
  4365. "name": "quantized::mul_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  4366. },
  4367. {
  4368. "name": "quantized::mul_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  4369. },
  4370. {
  4371. "name": "quantized::mul_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  4372. },
  4373. {
  4374. "name": "quantized::mul_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  4375. },
  4376. {
  4377. "name": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enable=True) -> Tensor",
  4378. "category": "Normalization"
  4379. },
  4380. {
  4381. "name": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor"
  4382. },
  4383. {
  4384. "name": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor",
  4385. "category": "Activation"
  4386. },
  4387. {
  4388. "name": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)"
  4389. },
  4390. {
  4391. "name": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor",
  4392. "category": "Pool"
  4393. },
  4394. {
  4395. "name": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)"
  4396. },
  4397. {
  4398. "name": "aten::inverse(Tensor self) -> Tensor"
  4399. },
  4400. {
  4401. "name": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4402. },
  4403. {
  4404. "name": "aten::angle(Tensor self) -> Tensor"
  4405. },
  4406. {
  4407. "name": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4408. },
  4409. {
  4410. "name": "aten::angle.int(int a) -> float"
  4411. },
  4412. {
  4413. "name": "aten::angle.float(float a) -> float"
  4414. },
  4415. {
  4416. "name": "aten::angle.complex(complex a) -> float"
  4417. },
  4418. {
  4419. "name": "aten::angle.Scalar(Scalar a) -> Scalar"
  4420. },
  4421. {
  4422. "name": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0., bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor",
  4423. "category": "Attention"
  4424. },
  4425. {
  4426. "name": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
  4427. "category": "Layer"
  4428. },
  4429. {
  4430. "name": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  4431. },
  4432. {
  4433. "name": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  4434. },
  4435. {
  4436. "name": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  4437. },
  4438. {
  4439. "name": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"
  4440. },
  4441. {
  4442. "name": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4443. },
  4444. {
  4445. "name": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4446. },
  4447. {
  4448. "name": "aten::prelu(Tensor self, Tensor weight) -> Tensor",
  4449. "category": "Activation"
  4450. },
  4451. {
  4452. "name": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)"
  4453. },
  4454. {
  4455. "name": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  4456. },
  4457. {
  4458. "name": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)"
  4459. },
  4460. {
  4461. "name": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor",
  4462. "category": "Layer"
  4463. },
  4464. {
  4465. "name": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)"
  4466. },
  4467. {
  4468. "name": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  4469. "category": "Layer"
  4470. },
  4471. {
  4472. "name": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], str padding=\"valid\", SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  4473. "category": "Layer"
  4474. },
  4475. {
  4476. "name": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  4477. "category": "Layer"
  4478. },
  4479. {
  4480. "name": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], str padding=\"valid\", SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  4481. "category": "Layer"
  4482. },
  4483. {
  4484. "name": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  4485. "category": "Layer"
  4486. },
  4487. {
  4488. "name": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], str padding=\"valid\", SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  4489. "category": "Layer"
  4490. },
  4491. {
  4492. "name": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]",
  4493. "category": "Tensor"
  4494. },
  4495. {
  4496. "name": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] output_padding=[0], SymInt groups=1, SymInt[1] dilation=[1]) -> Tensor",
  4497. "category": "Layer"
  4498. },
  4499. {
  4500. "name": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4501. },
  4502. {
  4503. "name": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4504. },
  4505. {
  4506. "name": "aten::randn_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4507. },
  4508. {
  4509. "name": "aten::randn_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4510. },
  4511. {
  4512. "name": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor"
  4513. },
  4514. {
  4515. "name": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  4516. },
  4517. {
  4518. "name": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  4519. },
  4520. {
  4521. "name": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4522. },
  4523. {
  4524. "name": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4525. },
  4526. {
  4527. "name": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4528. },
  4529. {
  4530. "name": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4531. },
  4532. {
  4533. "name": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4534. },
  4535. {
  4536. "name": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  4537. },
  4538. {
  4539. "name": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4540. },
  4541. {
  4542. "name": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  4543. },
  4544. {
  4545. "name": "aten::sqrt(Tensor self) -> Tensor"
  4546. },
  4547. {
  4548. "name": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4549. },
  4550. {
  4551. "name": "aten::sqrt.int(int a) -> float"
  4552. },
  4553. {
  4554. "name": "aten::sqrt.float(float a) -> float"
  4555. },
  4556. {
  4557. "name": "aten::sqrt.complex(complex a) -> complex"
  4558. },
  4559. {
  4560. "name": "aten::sqrt.Scalar(Scalar a) -> Scalar"
  4561. },
  4562. {
  4563. "name": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] output_padding=[0, 0, 0], SymInt groups=1, SymInt[3] dilation=[1, 1, 1]) -> Tensor",
  4564. "category": "Layer"
  4565. },
  4566. {
  4567. "name": "quantized::batch_norm(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor"
  4568. },
  4569. {
  4570. "name": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)"
  4571. },
  4572. {
  4573. "name": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)"
  4574. },
  4575. {
  4576. "name": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)"
  4577. },
  4578. {
  4579. "name": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)"
  4580. },
  4581. {
  4582. "name": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)"
  4583. },
  4584. {
  4585. "name": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  4586. },
  4587. {
  4588. "name": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4589. },
  4590. {
  4591. "name": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  4592. },
  4593. {
  4594. "name": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4595. },
  4596. {
  4597. "name": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4598. },
  4599. {
  4600. "name": "aten::randn.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4601. },
  4602. {
  4603. "name": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4604. },
  4605. {
  4606. "name": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4607. },
  4608. {
  4609. "name": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  4610. },
  4611. {
  4612. "name": "aten::randn.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  4613. },
  4614. {
  4615. "name": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  4616. },
  4617. {
  4618. "name": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)",
  4619. "category": "Layer"
  4620. },
  4621. {
  4622. "name": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]",
  4623. "category": "Tensor"
  4624. },
  4625. {
  4626. "name": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]",
  4627. "category": "Tensor"
  4628. },
  4629. {
  4630. "name": "aten::split.str(str self, str? separator=None, int max=-1) -> str[]"
  4631. },
  4632. {
  4633. "name": "aten::split(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]"
  4634. },
  4635. {
  4636. "name": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] output_padding=[0, 0], SymInt groups=1, SymInt[2] dilation=[1, 1]) -> Tensor",
  4637. "category": "Layer"
  4638. },
  4639. {
  4640. "name": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)"
  4641. },
  4642. {
  4643. "name": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  4644. },
  4645. {
  4646. "name": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  4647. },
  4648. {
  4649. "name": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  4650. },
  4651. {
  4652. "name": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4653. },
  4654. {
  4655. "name": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4656. },
  4657. {
  4658. "name": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4659. },
  4660. {
  4661. "name": "aten::randint_like.generator(Tensor self, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4662. },
  4663. {
  4664. "name": "aten::randint_like.generator_out(Tensor self, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4665. },
  4666. {
  4667. "name": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4668. },
  4669. {
  4670. "name": "aten::randint_like.generator_with_low_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4671. },
  4672. {
  4673. "name": "aten::randint_like.generator_with_low_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4674. },
  4675. {
  4676. "name": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"
  4677. },
  4678. {
  4679. "name": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"
  4680. },
  4681. {
  4682. "name": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4683. },
  4684. {
  4685. "name": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4686. },
  4687. {
  4688. "name": "aten::lt.int(int a, int b) -> bool"
  4689. },
  4690. {
  4691. "name": "aten::lt.float(float a, float b) -> bool"
  4692. },
  4693. {
  4694. "name": "aten::lt.int_float(int a, float b) -> bool"
  4695. },
  4696. {
  4697. "name": "aten::lt.float_int(float a, int b) -> bool"
  4698. },
  4699. {
  4700. "name": "aten::lt(Scalar a, Scalar b) -> bool"
  4701. },
  4702. {
  4703. "name": "aten::lt.str(str a, str b) -> bool"
  4704. },
  4705. {
  4706. "name": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor",
  4707. "category": "Layer"
  4708. },
  4709. {
  4710. "name": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)",
  4711. "category": "Layer"
  4712. },
  4713. {
  4714. "name": "quantized::batch_norm1d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  4715. "category": "Normalization"
  4716. },
  4717. {
  4718. "name": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4719. },
  4720. {
  4721. "name": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4722. },
  4723. {
  4724. "name": "aten::normal_(Tensor(a!) self, float mean=0., float std=1., *, Generator? generator=None) -> Tensor(a!)"
  4725. },
  4726. {
  4727. "name": "aten::normal.Tensor_float(Tensor mean, float std=1., *, Generator? generator=None) -> Tensor"
  4728. },
  4729. {
  4730. "name": "aten::normal.Tensor_float_out(Tensor mean, float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4731. },
  4732. {
  4733. "name": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4734. },
  4735. {
  4736. "name": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor"
  4737. },
  4738. {
  4739. "name": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor"
  4740. },
  4741. {
  4742. "name": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4743. },
  4744. {
  4745. "name": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4746. },
  4747. {
  4748. "name": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4749. },
  4750. {
  4751. "name": "aten::normal.out(Tensor self, float mean=0., float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4752. },
  4753. {
  4754. "name": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"
  4755. },
  4756. {
  4757. "name": "aten::logsumexp.names(Tensor self, str[1] dim, bool keepdim=False) -> Tensor"
  4758. },
  4759. {
  4760. "name": "aten::logsumexp.names_out(Tensor self, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4761. },
  4762. {
  4763. "name": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4764. },
  4765. {
  4766. "name": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor"
  4767. },
  4768. {
  4769. "name": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4770. },
  4771. {
  4772. "name": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4773. },
  4774. {
  4775. "name": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4776. },
  4777. {
  4778. "name": "aten::rand.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4779. },
  4780. {
  4781. "name": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4782. },
  4783. {
  4784. "name": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4785. },
  4786. {
  4787. "name": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  4788. },
  4789. {
  4790. "name": "aten::rand.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  4791. },
  4792. {
  4793. "name": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  4794. },
  4795. {
  4796. "name": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor"
  4797. },
  4798. {
  4799. "name": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)"
  4800. },
  4801. {
  4802. "name": "aten::cauchy_(Tensor(a!) self, float median=0., float sigma=1., *, Generator? generator=None) -> Tensor(a!)"
  4803. },
  4804. {
  4805. "name": "aten::randperm(SymInt n, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4806. },
  4807. {
  4808. "name": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4809. },
  4810. {
  4811. "name": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  4812. },
  4813. {
  4814. "name": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  4815. },
  4816. {
  4817. "name": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"
  4818. },
  4819. {
  4820. "name": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"
  4821. },
  4822. {
  4823. "name": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4824. },
  4825. {
  4826. "name": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4827. },
  4828. {
  4829. "name": "aten::gt.int(int a, int b) -> bool"
  4830. },
  4831. {
  4832. "name": "aten::gt.float(float a, float b) -> bool"
  4833. },
  4834. {
  4835. "name": "aten::gt.int_float(int a, float b) -> bool"
  4836. },
  4837. {
  4838. "name": "aten::gt.float_int(float a, int b) -> bool"
  4839. },
  4840. {
  4841. "name": "aten::gt(Scalar a, Scalar b) -> bool"
  4842. },
  4843. {
  4844. "name": "aten::gt.str(str a, str b) -> bool"
  4845. },
  4846. {
  4847. "name": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=0) -> Tensor(a)"
  4848. },
  4849. {
  4850. "name": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4851. },
  4852. {
  4853. "name": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4854. },
  4855. {
  4856. "name": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor"
  4857. },
  4858. {
  4859. "name": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  4860. },
  4861. {
  4862. "name": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor",
  4863. "category": "Tensor"
  4864. },
  4865. {
  4866. "name": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  4867. },
  4868. {
  4869. "name": "aten::linalg_qr(Tensor A, str mode=\"reduced\") -> (Tensor Q, Tensor R)"
  4870. },
  4871. {
  4872. "name": "aten::linalg_qr.out(Tensor A, str mode=\"reduced\", *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)"
  4873. },
  4874. {
  4875. "name": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
  4876. "category": "Tensor"
  4877. },
  4878. {
  4879. "name": "aten::cat.names(Tensor[] tensors, str dim) -> Tensor",
  4880. "category": "Tensor"
  4881. },
  4882. {
  4883. "name": "aten::cat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)",
  4884. "category": "Tensor"
  4885. },
  4886. {
  4887. "name": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)",
  4888. "category": "Tensor"
  4889. },
  4890. {
  4891. "name": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)"
  4892. },
  4893. {
  4894. "name": "aten::imag(Tensor(a) self) -> Tensor(a)"
  4895. },
  4896. {
  4897. "name": "aten::rot90(Tensor self, int k=1, int[] dims=[0, 1]) -> Tensor"
  4898. },
  4899. {
  4900. "name": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0, 1], *, Tensor(a!) out) -> Tensor(a!)"
  4901. },
  4902. {
  4903. "name": "aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor"
  4904. },
  4905. {
  4906. "name": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  4907. },
  4908. {
  4909. "name": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  4910. },
  4911. {
  4912. "name": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)",
  4913. "category": "Layer"
  4914. },
  4915. {
  4916. "name": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  4917. },
  4918. {
  4919. "name": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"
  4920. },
  4921. {
  4922. "name": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"
  4923. },
  4924. {
  4925. "name": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4926. },
  4927. {
  4928. "name": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4929. },
  4930. {
  4931. "name": "aten::eq.int_list(int[] a, int[] b) -> bool"
  4932. },
  4933. {
  4934. "name": "aten::eq.device(Device a, Device b) -> bool"
  4935. },
  4936. {
  4937. "name": "aten::eq.bool(bool a, bool b) -> bool"
  4938. },
  4939. {
  4940. "name": "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"
  4941. },
  4942. {
  4943. "name": "aten::eq.int(int a, int b) -> bool"
  4944. },
  4945. {
  4946. "name": "aten::eq.complex(complex a, complex b) -> bool"
  4947. },
  4948. {
  4949. "name": "aten::eq.float(float a, float b) -> bool"
  4950. },
  4951. {
  4952. "name": "aten::eq.int_float(int a, float b) -> bool"
  4953. },
  4954. {
  4955. "name": "aten::eq.float_int(float a, int b) -> bool"
  4956. },
  4957. {
  4958. "name": "aten::eq.float_complex(float a, complex b) -> bool"
  4959. },
  4960. {
  4961. "name": "aten::eq.complex_float(complex a, float b) -> bool"
  4962. },
  4963. {
  4964. "name": "aten::eq(Scalar a, Scalar b) -> bool"
  4965. },
  4966. {
  4967. "name": "aten::eq.str(str a, str b) -> bool"
  4968. },
  4969. {
  4970. "name": "aten::eq.float_list(float[] a, float[] b) -> bool"
  4971. },
  4972. {
  4973. "name": "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  4974. },
  4975. {
  4976. "name": "aten::eq.bool_list(bool[] a, bool[] b) -> bool"
  4977. },
  4978. {
  4979. "name": "aten::eq.str_list(str[] a, str[] b) -> bool"
  4980. },
  4981. {
  4982. "name": "aten::uniform_(Tensor(a!) self, float from=0., float to=1., *, Generator? generator=None) -> Tensor(a!)"
  4983. },
  4984. {
  4985. "name": "aten::signbit(Tensor self) -> Tensor"
  4986. },
  4987. {
  4988. "name": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4989. },
  4990. {
  4991. "name": "aten::asin(Tensor self) -> Tensor"
  4992. },
  4993. {
  4994. "name": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4995. },
  4996. {
  4997. "name": "aten::asin.int(int a) -> float"
  4998. },
  4999. {
  5000. "name": "aten::asin.float(float a) -> float"
  5001. },
  5002. {
  5003. "name": "aten::asin.complex(complex a) -> complex"
  5004. },
  5005. {
  5006. "name": "aten::asin.Scalar(Scalar a) -> Scalar"
  5007. },
  5008. {
  5009. "name": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)"
  5010. },
  5011. {
  5012. "name": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)",
  5013. "category": "Transform"
  5014. },
  5015. {
  5016. "name": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor"
  5017. },
  5018. {
  5019. "name": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor"
  5020. },
  5021. {
  5022. "name": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  5023. },
  5024. {
  5025. "name": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  5026. },
  5027. {
  5028. "name": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5029. },
  5030. {
  5031. "name": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  5032. },
  5033. {
  5034. "name": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5035. },
  5036. {
  5037. "name": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  5038. },
  5039. {
  5040. "name": "aten::div.int(int a, int b) -> float"
  5041. },
  5042. {
  5043. "name": "aten::div.complex(complex a, complex b) -> complex"
  5044. },
  5045. {
  5046. "name": "aten::div.float(float a, float b) -> float"
  5047. },
  5048. {
  5049. "name": "aten::div(Scalar a, Scalar b) -> float"
  5050. },
  5051. {
  5052. "name": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor"
  5053. },
  5054. {
  5055. "name": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor"
  5056. },
  5057. {
  5058. "name": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5059. },
  5060. {
  5061. "name": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5062. },
  5063. {
  5064. "name": "aten::mul.left_t(t[] l, int n) -> t[]"
  5065. },
  5066. {
  5067. "name": "aten::mul.right_(int n, t[] l) -> t[]"
  5068. },
  5069. {
  5070. "name": "aten::mul.int(int a, int b) -> int"
  5071. },
  5072. {
  5073. "name": "aten::mul.complex(complex a, complex b) -> complex"
  5074. },
  5075. {
  5076. "name": "aten::mul.float(float a, float b) -> float"
  5077. },
  5078. {
  5079. "name": "aten::mul.int_complex(int a, complex b) -> complex"
  5080. },
  5081. {
  5082. "name": "aten::mul.complex_int(complex a, int b) -> complex"
  5083. },
  5084. {
  5085. "name": "aten::mul.float_complex(float a, complex b) -> complex"
  5086. },
  5087. {
  5088. "name": "aten::mul.complex_float(complex a, float b) -> complex"
  5089. },
  5090. {
  5091. "name": "aten::mul.int_float(int a, float b) -> float"
  5092. },
  5093. {
  5094. "name": "aten::mul.float_int(float a, int b) -> float"
  5095. },
  5096. {
  5097. "name": "aten::mul(Scalar a, Scalar b) -> Scalar"
  5098. },
  5099. {
  5100. "name": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  5101. },
  5102. {
  5103. "name": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  5104. },
  5105. {
  5106. "name": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  5107. },
  5108. {
  5109. "name": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  5110. },
  5111. {
  5112. "name": "aten::sub.int(int a, int b) -> int"
  5113. },
  5114. {
  5115. "name": "aten::sub.complex(complex a, complex b) -> complex"
  5116. },
  5117. {
  5118. "name": "aten::sub.float(float a, float b) -> float"
  5119. },
  5120. {
  5121. "name": "aten::sub.int_complex(int a, complex b) -> complex"
  5122. },
  5123. {
  5124. "name": "aten::sub.complex_int(complex a, int b) -> complex"
  5125. },
  5126. {
  5127. "name": "aten::sub.float_complex(float a, complex b) -> complex"
  5128. },
  5129. {
  5130. "name": "aten::sub.complex_float(complex a, float b) -> complex"
  5131. },
  5132. {
  5133. "name": "aten::sub.int_float(int a, float b) -> float"
  5134. },
  5135. {
  5136. "name": "aten::sub.float_int(float a, int b) -> float"
  5137. },
  5138. {
  5139. "name": "aten::sub(Scalar a, Scalar b) -> Scalar"
  5140. },
  5141. {
  5142. "name": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  5143. },
  5144. {
  5145. "name": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  5146. },
  5147. {
  5148. "name": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  5149. },
  5150. {
  5151. "name": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  5152. },
  5153. {
  5154. "name": "aten::add.t(t[] a, t[] b) -> t[]"
  5155. },
  5156. {
  5157. "name": "aten::add.str(str a, str b) -> str"
  5158. },
  5159. {
  5160. "name": "aten::add.int(int a, int b) -> int"
  5161. },
  5162. {
  5163. "name": "aten::add.complex(complex a, complex b) -> complex"
  5164. },
  5165. {
  5166. "name": "aten::add.float(float a, float b) -> float"
  5167. },
  5168. {
  5169. "name": "aten::add.int_complex(int a, complex b) -> complex"
  5170. },
  5171. {
  5172. "name": "aten::add.complex_int(complex a, int b) -> complex"
  5173. },
  5174. {
  5175. "name": "aten::add.float_complex(float a, complex b) -> complex"
  5176. },
  5177. {
  5178. "name": "aten::add.complex_float(complex a, float b) -> complex"
  5179. },
  5180. {
  5181. "name": "aten::add.int_float(int a, float b) -> float"
  5182. },
  5183. {
  5184. "name": "aten::add.float_int(float a, int b) -> float"
  5185. },
  5186. {
  5187. "name": "aten::add(Scalar a, Scalar b) -> Scalar"
  5188. },
  5189. {
  5190. "name": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  5191. },
  5192. {
  5193. "name": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  5194. },
  5195. {
  5196. "name": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  5197. },
  5198. {
  5199. "name": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  5200. },
  5201. {
  5202. "name": "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  5203. },
  5204. {
  5205. "name": "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  5206. },
  5207. {
  5208. "name": "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  5209. },
  5210. {
  5211. "name": "aten::atanh(Tensor self) -> Tensor"
  5212. },
  5213. {
  5214. "name": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5215. },
  5216. {
  5217. "name": "aten::atanh.int(int a) -> float"
  5218. },
  5219. {
  5220. "name": "aten::atanh.float(float a) -> float"
  5221. },
  5222. {
  5223. "name": "aten::atanh.complex(complex a) -> complex"
  5224. },
  5225. {
  5226. "name": "aten::atanh.Scalar(Scalar a) -> Scalar"
  5227. },
  5228. {
  5229. "name": "aten::sinh(Tensor self) -> Tensor"
  5230. },
  5231. {
  5232. "name": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5233. },
  5234. {
  5235. "name": "aten::sinh.int(int a) -> float"
  5236. },
  5237. {
  5238. "name": "aten::sinh.float(float a) -> float"
  5239. },
  5240. {
  5241. "name": "aten::sinh.complex(complex a) -> complex"
  5242. },
  5243. {
  5244. "name": "aten::sinh.Scalar(Scalar a) -> Scalar"
  5245. },
  5246. {
  5247. "name": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)"
  5248. },
  5249. {
  5250. "name": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  5251. },
  5252. {
  5253. "name": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor"
  5254. },
  5255. {
  5256. "name": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)"
  5257. },
  5258. {
  5259. "name": "quantized::embedding_byte(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase weight, Tensor indices, bool pruned_weights=False) -> Tensor"
  5260. },
  5261. {
  5262. "name": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"
  5263. },
  5264. {
  5265. "name": "aten::index_copy.dimname(Tensor self, str dim, Tensor index, Tensor source) -> Tensor"
  5266. },
  5267. {
  5268. "name": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  5269. },
  5270. {
  5271. "name": "aten::round(Tensor self) -> Tensor"
  5272. },
  5273. {
  5274. "name": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor"
  5275. },
  5276. {
  5277. "name": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5278. },
  5279. {
  5280. "name": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)"
  5281. },
  5282. {
  5283. "name": "aten::round.int(int a) -> float"
  5284. },
  5285. {
  5286. "name": "aten::round.float(float a) -> float"
  5287. },
  5288. {
  5289. "name": "aten::round.Scalar(Scalar a) -> Scalar"
  5290. },
  5291. {
  5292. "name": "aten::cos(Tensor self) -> Tensor"
  5293. },
  5294. {
  5295. "name": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5296. },
  5297. {
  5298. "name": "aten::cos.int(int a) -> float"
  5299. },
  5300. {
  5301. "name": "aten::cos.float(float a) -> float"
  5302. },
  5303. {
  5304. "name": "aten::cos.complex(complex a) -> complex"
  5305. },
  5306. {
  5307. "name": "aten::cos.Scalar(Scalar a) -> Scalar"
  5308. },
  5309. {
  5310. "name": "aten::stride.int(Tensor self, int dim) -> int"
  5311. },
  5312. {
  5313. "name": "aten::stride.Dimname(Tensor self, str dim) -> int"
  5314. },
  5315. {
  5316. "name": "aten::stride(Tensor self) -> int[]"
  5317. },
  5318. {
  5319. "name": "aten::matmul(Tensor self, Tensor other) -> Tensor"
  5320. },
  5321. {
  5322. "name": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5323. },
  5324. {
  5325. "name": "quantized::conv2d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  5326. "category": "Layer"
  5327. },
  5328. {
  5329. "name": "quantized::conv2d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  5330. "category": "Layer"
  5331. },
  5332. {
  5333. "name": "aten::relu(Tensor self) -> Tensor",
  5334. "category": "Activation"
  5335. },
  5336. {
  5337. "name": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5338. },
  5339. {
  5340. "name": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor",
  5341. "category": "Layer"
  5342. },
  5343. {
  5344. "name": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  5345. "category": "Layer"
  5346. },
  5347. {
  5348. "name": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  5349. },
  5350. {
  5351. "name": "aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)"
  5352. },
  5353. {
  5354. "name": "aten::neg(Tensor self) -> Tensor"
  5355. },
  5356. {
  5357. "name": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5358. },
  5359. {
  5360. "name": "aten::neg.int(int a) -> int"
  5361. },
  5362. {
  5363. "name": "aten::neg.float(float a) -> float"
  5364. },
  5365. {
  5366. "name": "aten::neg.complex(complex a) -> complex"
  5367. },
  5368. {
  5369. "name": "aten::neg.Scalar(Scalar a) -> Scalar"
  5370. },
  5371. {
  5372. "name": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  5373. },
  5374. {
  5375. "name": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  5376. },
  5377. {
  5378. "name": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  5379. },
  5380. {
  5381. "name": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  5382. },
  5383. {
  5384. "name": "aten::log1p(Tensor self) -> Tensor"
  5385. },
  5386. {
  5387. "name": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5388. },
  5389. {
  5390. "name": "aten::log1p.int(int a) -> float"
  5391. },
  5392. {
  5393. "name": "aten::log1p.float(float a) -> float"
  5394. },
  5395. {
  5396. "name": "aten::log1p.Scalar(Scalar a) -> Scalar"
  5397. },
  5398. {
  5399. "name": "aten::log10(Tensor self) -> Tensor"
  5400. },
  5401. {
  5402. "name": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5403. },
  5404. {
  5405. "name": "aten::log10.int(int a) -> float"
  5406. },
  5407. {
  5408. "name": "aten::log10.float(float a) -> float"
  5409. },
  5410. {
  5411. "name": "aten::log10.complex(complex a) -> complex"
  5412. },
  5413. {
  5414. "name": "aten::log10.Scalar(Scalar a) -> Scalar"
  5415. },
  5416. {
  5417. "name": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor",
  5418. "category": "Quantization"
  5419. },
  5420. {
  5421. "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor",
  5422. "category": "Quantization"
  5423. },
  5424. {
  5425. "name": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5426. },
  5427. {
  5428. "name": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)"
  5429. },
  5430. {
  5431. "name": "aten::clamp_max(Tensor self, Scalar max) -> Tensor"
  5432. },
  5433. {
  5434. "name": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor"
  5435. },
  5436. {
  5437. "name": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)"
  5438. },
  5439. {
  5440. "name": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)"
  5441. },
  5442. {
  5443. "name": "aten::log2(Tensor self) -> Tensor"
  5444. },
  5445. {
  5446. "name": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5447. },
  5448. {
  5449. "name": "quantized::make_quantized_cell_params_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  5450. },
  5451. {
  5452. "name": "aten::size.int(Tensor self, int dim) -> int"
  5453. },
  5454. {
  5455. "name": "aten::size.Dimname(Tensor self, str dim) -> int"
  5456. },
  5457. {
  5458. "name": "aten::size(Tensor self) -> int[]"
  5459. },
  5460. {
  5461. "name": "quantized::mul_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  5462. },
  5463. {
  5464. "name": "quantized::mul_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  5465. },
  5466. {
  5467. "name": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor"
  5468. },
  5469. {
  5470. "name": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor"
  5471. },
  5472. {
  5473. "name": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5474. },
  5475. {
  5476. "name": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5477. },
  5478. {
  5479. "name": "aten::le.int(int a, int b) -> bool"
  5480. },
  5481. {
  5482. "name": "aten::le.float(float a, float b) -> bool"
  5483. },
  5484. {
  5485. "name": "aten::le.int_float(int a, float b) -> bool"
  5486. },
  5487. {
  5488. "name": "aten::le.float_int(float a, int b) -> bool"
  5489. },
  5490. {
  5491. "name": "aten::le(Scalar a, Scalar b) -> bool"
  5492. },
  5493. {
  5494. "name": "aten::le.str(str a, str b) -> bool"
  5495. },
  5496. {
  5497. "name": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)"
  5498. },
  5499. {
  5500. "name": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor",
  5501. "category": "Layer"
  5502. },
  5503. {
  5504. "name": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  5505. "category": "Layer"
  5506. },
  5507. {
  5508. "name": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  5509. },
  5510. {
  5511. "name": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"
  5512. },
  5513. {
  5514. "name": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"
  5515. },
  5516. {
  5517. "name": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5518. },
  5519. {
  5520. "name": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5521. },
  5522. {
  5523. "name": "aten::ne.int_list(int[] a, int[] b) -> bool"
  5524. },
  5525. {
  5526. "name": "aten::ne.device(Device a, Device b) -> bool"
  5527. },
  5528. {
  5529. "name": "aten::ne.bool(bool a, bool b) -> bool"
  5530. },
  5531. {
  5532. "name": "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"
  5533. },
  5534. {
  5535. "name": "aten::ne.int(int a, int b) -> bool"
  5536. },
  5537. {
  5538. "name": "aten::ne.complex(complex a, complex b) -> bool"
  5539. },
  5540. {
  5541. "name": "aten::ne.float(float a, float b) -> bool"
  5542. },
  5543. {
  5544. "name": "aten::ne.int_float(int a, float b) -> bool"
  5545. },
  5546. {
  5547. "name": "aten::ne.float_int(float a, int b) -> bool"
  5548. },
  5549. {
  5550. "name": "aten::ne.float_complex(float a, complex b) -> bool"
  5551. },
  5552. {
  5553. "name": "aten::ne.complex_float(complex a, float b) -> bool"
  5554. },
  5555. {
  5556. "name": "aten::ne(Scalar a, Scalar b) -> bool"
  5557. },
  5558. {
  5559. "name": "aten::ne.str(str a, str b) -> bool"
  5560. },
  5561. {
  5562. "name": "aten::ne.float_list(float[] a, float[] b) -> bool"
  5563. },
  5564. {
  5565. "name": "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  5566. },
  5567. {
  5568. "name": "aten::ne.bool_list(bool[] a, bool[] b) -> bool"
  5569. },
  5570. {
  5571. "name": "aten::ne.str_list(str[] a, str[] b) -> bool"
  5572. },
  5573. {
  5574. "name": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  5575. },
  5576. {
  5577. "name": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  5578. },
  5579. {
  5580. "name": "aten::expm1(Tensor self) -> Tensor"
  5581. },
  5582. {
  5583. "name": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5584. },
  5585. {
  5586. "name": "aten::expm1.int(int a) -> float"
  5587. },
  5588. {
  5589. "name": "aten::expm1.float(float a) -> float"
  5590. },
  5591. {
  5592. "name": "aten::expm1.Scalar(Scalar a) -> Scalar"
  5593. },
  5594. {
  5595. "name": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  5596. },
  5597. {
  5598. "name": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  5599. },
  5600. {
  5601. "name": "aten::trace(Tensor self) -> Tensor"
  5602. },
  5603. {
  5604. "name": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5605. },
  5606. {
  5607. "name": "prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.LinearOpContext"
  5608. },
  5609. {
  5610. "name": "aten::ceil(Tensor self) -> Tensor"
  5611. },
  5612. {
  5613. "name": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5614. },
  5615. {
  5616. "name": "aten::ceil.int(int a) -> int"
  5617. },
  5618. {
  5619. "name": "aten::ceil.float(float a) -> int"
  5620. },
  5621. {
  5622. "name": "aten::ceil.Scalar(Scalar a) -> Scalar"
  5623. },
  5624. {
  5625. "name": "aten::sigmoid(Tensor self) -> Tensor",
  5626. "category": "Activation"
  5627. },
  5628. {
  5629. "name": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5630. },
  5631. {
  5632. "name": "aten::kl_div(Tensor self, Tensor target, int reduction=1, *, bool log_target=False) -> Tensor"
  5633. },
  5634. {
  5635. "name": "aten::sin(Tensor self) -> Tensor"
  5636. },
  5637. {
  5638. "name": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5639. },
  5640. {
  5641. "name": "aten::sin.int(int a) -> float"
  5642. },
  5643. {
  5644. "name": "aten::sin.float(float a) -> float"
  5645. },
  5646. {
  5647. "name": "aten::sin.complex(complex a) -> complex"
  5648. },
  5649. {
  5650. "name": "aten::sin.Scalar(Scalar a) -> Scalar"
  5651. },
  5652. {
  5653. "name": "aten::atan(Tensor self) -> Tensor"
  5654. },
  5655. {
  5656. "name": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5657. },
  5658. {
  5659. "name": "aten::atan.int(int a) -> float"
  5660. },
  5661. {
  5662. "name": "aten::atan.float(float a) -> float"
  5663. },
  5664. {
  5665. "name": "aten::atan.complex(complex a) -> complex"
  5666. },
  5667. {
  5668. "name": "aten::atan.Scalar(Scalar a) -> Scalar"
  5669. },
  5670. {
  5671. "name": "aten::abs_(Tensor(a!) self) -> Tensor(a!)"
  5672. },
  5673. {
  5674. "name": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor"
  5675. },
  5676. {
  5677. "name": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5678. },
  5679. {
  5680. "name": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5681. },
  5682. {
  5683. "name": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)"
  5684. },
  5685. {
  5686. "name": "aten::abs(Tensor self) -> Tensor"
  5687. },
  5688. {
  5689. "name": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5690. },
  5691. {
  5692. "name": "aten::clamp_min(Tensor self, Scalar min) -> Tensor"
  5693. },
  5694. {
  5695. "name": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor"
  5696. },
  5697. {
  5698. "name": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)"
  5699. },
  5700. {
  5701. "name": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)"
  5702. },
  5703. {
  5704. "name": "aten::log(Tensor self) -> Tensor"
  5705. },
  5706. {
  5707. "name": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5708. },
  5709. {
  5710. "name": "aten::log.int(int a) -> float"
  5711. },
  5712. {
  5713. "name": "aten::log.float(float a) -> float"
  5714. },
  5715. {
  5716. "name": "aten::log.complex(complex a) -> complex"
  5717. },
  5718. {
  5719. "name": "aten::log.Scalar(Scalar a) -> Scalar"
  5720. },
  5721. {
  5722. "name": "aten::log.int_int(int a, int b) -> float"
  5723. },
  5724. {
  5725. "name": "aten::log.float_float(float a, float b) -> float"
  5726. },
  5727. {
  5728. "name": "aten::log.complex_complex(complex a, complex b) -> complex"
  5729. },
  5730. {
  5731. "name": "aten::log.int_float(int a, float b) -> float"
  5732. },
  5733. {
  5734. "name": "aten::log.float_int(float a, int b) -> float"
  5735. },
  5736. {
  5737. "name": "aten::log.int_complex(int a, complex b) -> complex"
  5738. },
  5739. {
  5740. "name": "aten::log.complex_int(complex a, int b) -> complex"
  5741. },
  5742. {
  5743. "name": "aten::log.float_complex(float a, complex b) -> complex"
  5744. },
  5745. {
  5746. "name": "aten::log.complex_float(complex a, float b) -> complex"
  5747. },
  5748. {
  5749. "name": "aten::log.Scalar_Scalar(Scalar a, Scalar b) -> float"
  5750. },
  5751. {
  5752. "name": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)",
  5753. "category": "Tensor"
  5754. },
  5755. {
  5756. "name": "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
  5757. "category": "Tensor"
  5758. },
  5759. {
  5760. "name": "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str",
  5761. "category": "Tensor"
  5762. },
  5763. {
  5764. "name": "aten::reciprocal(Tensor self) -> Tensor"
  5765. },
  5766. {
  5767. "name": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5768. },
  5769. {
  5770. "name": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)"
  5771. },
  5772. {
  5773. "name": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)"
  5774. },
  5775. {
  5776. "name": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  5777. },
  5778. {
  5779. "name": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  5780. },
  5781. {
  5782. "name": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5783. },
  5784. {
  5785. "name": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5786. },
  5787. {
  5788. "name": "aten::__lshift__.int(int a, int b) -> int"
  5789. },
  5790. {
  5791. "name": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)"
  5792. },
  5793. {
  5794. "name": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  5795. },
  5796. {
  5797. "name": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  5798. },
  5799. {
  5800. "name": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  5801. },
  5802. {
  5803. "name": "aten::floor(Tensor self) -> Tensor"
  5804. },
  5805. {
  5806. "name": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5807. },
  5808. {
  5809. "name": "aten::floor.int(int a) -> int"
  5810. },
  5811. {
  5812. "name": "aten::floor.float(float a) -> int"
  5813. },
  5814. {
  5815. "name": "aten::floor.Scalar(Scalar a) -> Scalar"
  5816. },
  5817. {
  5818. "name": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor"
  5819. },
  5820. {
  5821. "name": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5822. },
  5823. {
  5824. "name": "quantized::conv2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  5825. },
  5826. {
  5827. "name": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  5828. "category": "Activation"
  5829. },
  5830. {
  5831. "name": "aten::softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  5832. "category": "Activation"
  5833. },
  5834. {
  5835. "name": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  5836. },
  5837. {
  5838. "name": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)"
  5839. },
  5840. {
  5841. "name": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)"
  5842. },
  5843. {
  5844. "name": "aten::lift_fresh_copy(Tensor self) -> Tensor"
  5845. },
  5846. {
  5847. "name": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5848. },
  5849. {
  5850. "name": "aten::is_floating_point(Tensor self) -> bool"
  5851. },
  5852. {
  5853. "name": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  5854. },
  5855. {
  5856. "name": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  5857. },
  5858. {
  5859. "name": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  5860. },
  5861. {
  5862. "name": "aten::conj(Tensor(a) self) -> Tensor(a)"
  5863. },
  5864. {
  5865. "name": "quantized::add_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  5866. },
  5867. {
  5868. "name": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"
  5869. },
  5870. {
  5871. "name": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"
  5872. },
  5873. {
  5874. "name": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5875. },
  5876. {
  5877. "name": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5878. },
  5879. {
  5880. "name": "aten::ge.int(int a, int b) -> bool"
  5881. },
  5882. {
  5883. "name": "aten::ge.float(float a, float b) -> bool"
  5884. },
  5885. {
  5886. "name": "aten::ge.int_float(int a, float b) -> bool"
  5887. },
  5888. {
  5889. "name": "aten::ge.float_int(float a, int b) -> bool"
  5890. },
  5891. {
  5892. "name": "aten::ge(Scalar a, Scalar b) -> bool"
  5893. },
  5894. {
  5895. "name": "aten::ge.str(str a, str b) -> bool"
  5896. },
  5897. {
  5898. "name": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  5899. "category": "Tensor"
  5900. },
  5901. {
  5902. "name": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5903. },
  5904. {
  5905. "name": "aten::_conj(Tensor(a) self) -> Tensor(a)"
  5906. },
  5907. {
  5908. "name": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  5909. "category": "Dropout"
  5910. },
  5911. {
  5912. "name": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  5913. },
  5914. {
  5915. "name": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  5916. },
  5917. {
  5918. "name": "aten::numpy_T(Tensor(a) self) -> Tensor(a)"
  5919. },
  5920. {
  5921. "name": "aten::numpy_T.a(Tensor(a) self) -> Tensor(a)"
  5922. },
  5923. {
  5924. "name": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)",
  5925. "category": "Pool"
  5926. },
  5927. {
  5928. "name": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  5929. },
  5930. {
  5931. "name": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)"
  5932. },
  5933. {
  5934. "name": "aten::alias(Tensor(a) self) -> Tensor(a)"
  5935. },
  5936. {
  5937. "name": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor"
  5938. },
  5939. {
  5940. "name": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor"
  5941. },
  5942. {
  5943. "name": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor"
  5944. },
  5945. {
  5946. "name": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5947. },
  5948. {
  5949. "name": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5950. },
  5951. {
  5952. "name": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  5953. },
  5954. {
  5955. "name": "aten::pow.int(int a, int b) -> float"
  5956. },
  5957. {
  5958. "name": "aten::pow.complex(complex a, complex b) -> complex"
  5959. },
  5960. {
  5961. "name": "aten::pow.float(float a, float b) -> float"
  5962. },
  5963. {
  5964. "name": "aten::pow.int_float(int a, float b) -> float"
  5965. },
  5966. {
  5967. "name": "aten::pow.float_int(float a, int b) -> float"
  5968. },
  5969. {
  5970. "name": "aten::pow.float_complex(float a, complex b) -> complex"
  5971. },
  5972. {
  5973. "name": "aten::pow.complex_float(complex a, float b) -> complex"
  5974. },
  5975. {
  5976. "name": "aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float"
  5977. },
  5978. {
  5979. "name": "aten::pow.int_to_int(int a, int b) -> int"
  5980. },
  5981. {
  5982. "name": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]"
  5983. },
  5984. {
  5985. "name": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)"
  5986. },
  5987. {
  5988. "name": "aten::copy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  5989. },
  5990. {
  5991. "name": "aten::copy_.int(Tensor(a!) self, int other) -> Tensor(a!)"
  5992. },
  5993. {
  5994. "name": "aten::copy_.float(Tensor(a!) self, float other) -> Tensor(a!)"
  5995. },
  5996. {
  5997. "name": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5998. },
  5999. {
  6000. "name": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6001. },
  6002. {
  6003. "name": "aten::empty.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  6004. },
  6005. {
  6006. "name": "aten::empty.names_out(int[] size, *, str[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6007. },
  6008. {
  6009. "name": "aten::mv(Tensor self, Tensor vec) -> Tensor"
  6010. },
  6011. {
  6012. "name": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)"
  6013. },
  6014. {
  6015. "name": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor"
  6016. },
  6017. {
  6018. "name": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  6019. },
  6020. {
  6021. "name": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]"
  6022. },
  6023. {
  6024. "name": "aten::unbind.Dimname(Tensor(a -> *) self, str dim) -> Tensor(a)[]"
  6025. },
  6026. {
  6027. "name": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  6028. },
  6029. {
  6030. "name": "aten::real(Tensor(a) self) -> Tensor(a)"
  6031. },
  6032. {
  6033. "name": "quantized::conv1d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  6034. "category": "Layer"
  6035. },
  6036. {
  6037. "name": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enabled=True) -> Tensor",
  6038. "category": "Normalization"
  6039. },
  6040. {
  6041. "name": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor"
  6042. },
  6043. {
  6044. "name": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  6045. },
  6046. {
  6047. "name": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  6048. },
  6049. {
  6050. "name": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  6051. },
  6052. {
  6053. "name": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)",
  6054. "category": "Transform"
  6055. },
  6056. {
  6057. "name": "quantized::mul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  6058. },
  6059. {
  6060. "name": "quantized::mul.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  6061. },
  6062. {
  6063. "name": "quantized::mul.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  6064. },
  6065. {
  6066. "name": "quantized::mul.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  6067. },
  6068. {
  6069. "name": "quantized::mul.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  6070. },
  6071. {
  6072. "name": "aten::l1_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  6073. },
  6074. {
  6075. "name": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)",
  6076. "category": "Normalization"
  6077. },
  6078. {
  6079. "name": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6080. },
  6081. {
  6082. "name": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)",
  6083. "category": "Transform"
  6084. },
  6085. {
  6086. "name": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)",
  6087. "category": "Shape"
  6088. },
  6089. {
  6090. "name": "aten::unflatten.Dimname(Tensor(a) self, str dim, SymInt[] sizes, str[] names) -> Tensor(a)",
  6091. "category": "Shape"
  6092. },
  6093. {
  6094. "name": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)",
  6095. "category": "Pool"
  6096. },
  6097. {
  6098. "name": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  6099. },
  6100. {
  6101. "name": "quantized::conv_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  6102. },
  6103. {
  6104. "name": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor",
  6105. "category": "Pool"
  6106. },
  6107. {
  6108. "name": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)",
  6109. "category": "Pool"
  6110. },
  6111. {
  6112. "name": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)"
  6113. },
  6114. {
  6115. "name": "aten::view_as_real(Tensor(a) self) -> Tensor(a)"
  6116. },
  6117. {
  6118. "name": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)"
  6119. },
  6120. {
  6121. "name": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)"
  6122. },
  6123. {
  6124. "name": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)"
  6125. },
  6126. {
  6127. "name": "aten::diagonal.Dimname(Tensor(a) self, *, str outdim, str dim1, str dim2, int offset=0) -> Tensor(a)"
  6128. },
  6129. {
  6130. "name": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  6131. },
  6132. {
  6133. "name": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  6134. },
  6135. {
  6136. "name": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor"
  6137. },
  6138. {
  6139. "name": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)"
  6140. },
  6141. {
  6142. "name": "aten::detach(Tensor(a) self) -> Tensor(a)"
  6143. },
  6144. {
  6145. "name": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  6146. "category": "Dropout"
  6147. },
  6148. {
  6149. "name": "aten::rsqrt(Tensor self) -> Tensor"
  6150. },
  6151. {
  6152. "name": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6153. },
  6154. {
  6155. "name": "aten::dropout(Tensor input, float p, bool train) -> Tensor",
  6156. "category": "Dropout"
  6157. },
  6158. {
  6159. "name": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  6160. },
  6161. {
  6162. "name": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  6163. },
  6164. {
  6165. "name": "aten::mul_.t(t[](a!) l, int n) -> t[](a!)"
  6166. },
  6167. {
  6168. "name": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor"
  6169. },
  6170. {
  6171. "name": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  6172. },
  6173. {
  6174. "name": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor"
  6175. },
  6176. {
  6177. "name": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)"
  6178. },
  6179. {
  6180. "name": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)",
  6181. "category": "Transform"
  6182. },
  6183. {
  6184. "name": "aten::transpose.Dimname(Tensor(a) self, str dim0, str dim1) -> Tensor(a)",
  6185. "category": "Transform"
  6186. },
  6187. {
  6188. "name": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6189. },
  6190. {
  6191. "name": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  6192. },
  6193. {
  6194. "name": "aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6195. },
  6196. {
  6197. "name": "aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6198. },
  6199. {
  6200. "name": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6201. },
  6202. {
  6203. "name": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6204. },
  6205. {
  6206. "name": "aten::sum.int(int[] self) -> int"
  6207. },
  6208. {
  6209. "name": "aten::sum.float(float[] self) -> float"
  6210. },
  6211. {
  6212. "name": "aten::sum.complex(complex[] self) -> complex"
  6213. },
  6214. {
  6215. "name": "aten::sum.bool(bool[] self) -> int"
  6216. },
  6217. {
  6218. "name": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor"
  6219. },
  6220. {
  6221. "name": "quantized::conv2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  6222. },
  6223. {
  6224. "name": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor"
  6225. },
  6226. {
  6227. "name": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  6228. },
  6229. {
  6230. "name": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor"
  6231. },
  6232. {
  6233. "name": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor"
  6234. },
  6235. {
  6236. "name": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  6237. },
  6238. {
  6239. "name": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  6240. },
  6241. {
  6242. "name": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  6243. },
  6244. {
  6245. "name": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  6246. },
  6247. {
  6248. "name": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=1, float beta=1.) -> Tensor"
  6249. },
  6250. {
  6251. "name": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=1, float beta=1., *, Tensor(a!) out) -> Tensor(a!)"
  6252. },
  6253. {
  6254. "name": "quantized::conv2d_unpack_sizes(Any packed_weights) -> Any"
  6255. },
  6256. {
  6257. "name": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)"
  6258. },
  6259. {
  6260. "name": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor"
  6261. },
  6262. {
  6263. "name": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor"
  6264. },
  6265. {
  6266. "name": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  6267. },
  6268. {
  6269. "name": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  6270. },
  6271. {
  6272. "name": "aten::cosh(Tensor self) -> Tensor"
  6273. },
  6274. {
  6275. "name": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6276. },
  6277. {
  6278. "name": "aten::cosh.int(int a) -> float"
  6279. },
  6280. {
  6281. "name": "aten::cosh.float(float a) -> float"
  6282. },
  6283. {
  6284. "name": "aten::cosh.complex(complex a) -> complex"
  6285. },
  6286. {
  6287. "name": "aten::cosh.Scalar(Scalar a) -> Scalar"
  6288. },
  6289. {
  6290. "name": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)",
  6291. "category": "Transform"
  6292. },
  6293. {
  6294. "name": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)",
  6295. "category": "Transform"
  6296. },
  6297. {
  6298. "name": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)"
  6299. },
  6300. {
  6301. "name": "aten::squeeze_.dimname(Tensor(a!) self, str dim) -> Tensor(a!)",
  6302. "category": "Transform"
  6303. },
  6304. {
  6305. "name": "quantized::conv2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  6306. },
  6307. {
  6308. "name": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor"
  6309. },
  6310. {
  6311. "name": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor"
  6312. },
  6313. {
  6314. "name": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  6315. },
  6316. {
  6317. "name": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6318. },
  6319. {
  6320. "name": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  6321. },
  6322. {
  6323. "name": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6324. },
  6325. {
  6326. "name": "aten::remainder.int(int a, int b) -> int"
  6327. },
  6328. {
  6329. "name": "aten::remainder.float(float a, float b) -> float"
  6330. },
  6331. {
  6332. "name": "aten::remainder.int_float(int a, float b) -> float"
  6333. },
  6334. {
  6335. "name": "aten::remainder.float_int(float a, int b) -> float"
  6336. },
  6337. {
  6338. "name": "aten::remainder(Scalar a, Scalar b) -> Scalar"
  6339. },
  6340. {
  6341. "name": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor",
  6342. "category": "Layer"
  6343. },
  6344. {
  6345. "name": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
  6346. "category": "Layer"
  6347. },
  6348. {
  6349. "name": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)"
  6350. },
  6351. {
  6352. "name": "aten::squeeze(Tensor(a) self) -> Tensor(a)",
  6353. "category": "Transform"
  6354. },
  6355. {
  6356. "name": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)",
  6357. "category": "Transform"
  6358. },
  6359. {
  6360. "name": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)",
  6361. "category": "Transform"
  6362. },
  6363. {
  6364. "name": "aten::squeeze.dimname(Tensor(a) self, str dim) -> Tensor(a)",
  6365. "category": "Transform"
  6366. },
  6367. {
  6368. "name": "aten::select.Dimname(Tensor(a) self, str dim, int index) -> Tensor(a)"
  6369. },
  6370. {
  6371. "name": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)"
  6372. },
  6373. {
  6374. "name": "aten::select.t(t[](a) list, int idx) -> t(*)"
  6375. },
  6376. {
  6377. "name": "aten::special_expit(Tensor self) -> Tensor"
  6378. },
  6379. {
  6380. "name": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6381. },
  6382. {
  6383. "name": "aten::cdist(Tensor x1, Tensor x2, float p=2., int? compute_mode=None) -> Tensor"
  6384. },
  6385. {
  6386. "name": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)"
  6387. },
  6388. {
  6389. "name": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)"
  6390. },
  6391. {
  6392. "name": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor"
  6393. },
  6394. {
  6395. "name": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6396. },
  6397. {
  6398. "name": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor"
  6399. },
  6400. {
  6401. "name": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  6402. },
  6403. {
  6404. "name": "aten::concatenate.names(Tensor[] tensors, str dim) -> Tensor"
  6405. },
  6406. {
  6407. "name": "aten::concatenate.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  6408. },
  6409. {
  6410. "name": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)",
  6411. "category": "Shape"
  6412. },
  6413. {
  6414. "name": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  6415. },
  6416. {
  6417. "name": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6418. },
  6419. {
  6420. "name": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  6421. },
  6422. {
  6423. "name": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  6424. },
  6425. {
  6426. "name": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  6427. },
  6428. {
  6429. "name": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  6430. },
  6431. {
  6432. "name": "aten::mse_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  6433. },
  6434. {
  6435. "name": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  6436. },
  6437. {
  6438. "name": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  6439. },
  6440. {
  6441. "name": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  6442. },
  6443. {
  6444. "name": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  6445. },
  6446. {
  6447. "name": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  6448. },
  6449. {
  6450. "name": "aten::dot(Tensor self, Tensor tensor) -> Tensor"
  6451. },
  6452. {
  6453. "name": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)"
  6454. },
  6455. {
  6456. "name": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  6457. },
  6458. {
  6459. "name": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  6460. },
  6461. {
  6462. "name": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)",
  6463. "category": "Shape"
  6464. },
  6465. {
  6466. "name": "aten::maximum(Tensor self, Tensor other) -> Tensor"
  6467. },
  6468. {
  6469. "name": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6470. },
  6471. {
  6472. "name": "aten::t(Tensor(a) self) -> Tensor(a)"
  6473. },
  6474. {
  6475. "name": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  6476. },
  6477. {
  6478. "name": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6479. },
  6480. {
  6481. "name": "aten::detach_(Tensor(a!) self) -> Tensor(a!)"
  6482. },
  6483. {
  6484. "name": "quantized::relu6(Tensor qx, bool inplace=False) -> Tensor",
  6485. "category": "Activation"
  6486. },
  6487. {
  6488. "name": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"
  6489. },
  6490. {
  6491. "name": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"
  6492. },
  6493. {
  6494. "name": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"
  6495. },
  6496. {
  6497. "name": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"
  6498. },
  6499. {
  6500. "name": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  6501. },
  6502. {
  6503. "name": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  6504. },
  6505. {
  6506. "name": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  6507. },
  6508. {
  6509. "name": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  6510. },
  6511. {
  6512. "name": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  6513. },
  6514. {
  6515. "name": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)"
  6516. },
  6517. {
  6518. "name": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  6519. },
  6520. {
  6521. "name": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  6522. },
  6523. {
  6524. "name": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  6525. },
  6526. {
  6527. "name": "aten::bmm(Tensor self, Tensor mat2) -> Tensor"
  6528. },
  6529. {
  6530. "name": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  6531. },
  6532. {
  6533. "name": "aten::mm(Tensor self, Tensor mat2) -> Tensor"
  6534. },
  6535. {
  6536. "name": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  6537. },
  6538. {
  6539. "name": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  6540. },
  6541. {
  6542. "name": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  6543. },
  6544. {
  6545. "name": "aten::sign(Tensor self) -> Tensor"
  6546. },
  6547. {
  6548. "name": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6549. },
  6550. {
  6551. "name": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)"
  6552. },
  6553. {
  6554. "name": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)"
  6555. },
  6556. {
  6557. "name": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
  6558. "category": "Pool"
  6559. },
  6560. {
  6561. "name": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  6562. },
  6563. {
  6564. "name": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"
  6565. },
  6566. {
  6567. "name": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  6568. },
  6569. {
  6570. "name": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)"
  6571. },
  6572. {
  6573. "name": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  6574. },
  6575. {
  6576. "name": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  6577. },
  6578. {
  6579. "name": "aten::vdot(Tensor self, Tensor other) -> Tensor"
  6580. },
  6581. {
  6582. "name": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6583. },
  6584. {
  6585. "name": "aten::minimum(Tensor self, Tensor other) -> Tensor"
  6586. },
  6587. {
  6588. "name": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6589. },
  6590. {
  6591. "name": "aten::tan(Tensor self) -> Tensor"
  6592. },
  6593. {
  6594. "name": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6595. },
  6596. {
  6597. "name": "aten::tan.int(int a) -> float"
  6598. },
  6599. {
  6600. "name": "aten::tan.float(float a) -> float"
  6601. },
  6602. {
  6603. "name": "aten::tan.complex(complex a) -> complex"
  6604. },
  6605. {
  6606. "name": "aten::tan.Scalar(Scalar a) -> Scalar"
  6607. },
  6608. {
  6609. "name": "quantized::quantized_lstm_cell_dynamic(Tensor input, Tensor[] hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh) -> (Tensor, Tensor)"
  6610. },
  6611. {
  6612. "name": "aten::log_normal_(Tensor(a!) self, float mean=1., float std=2., *, Generator? generator=None) -> Tensor(a!)"
  6613. },
  6614. {
  6615. "name": "aten::complex(Tensor real, Tensor imag) -> Tensor"
  6616. },
  6617. {
  6618. "name": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)"
  6619. },
  6620. {
  6621. "name": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)"
  6622. },
  6623. {
  6624. "name": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)"
  6625. },
  6626. {
  6627. "name": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)"
  6628. },
  6629. {
  6630. "name": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  6631. },
  6632. {
  6633. "name": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6634. },
  6635. {
  6636. "name": "quantized::quantized_rnn_relu_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  6637. },
  6638. {
  6639. "name": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor"
  6640. },
  6641. {
  6642. "name": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  6643. },
  6644. {
  6645. "name": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6646. },
  6647. {
  6648. "name": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor"
  6649. },
  6650. {
  6651. "name": "aten::einsum.sublist(Tensor a, ...) -> Tensor"
  6652. },
  6653. {
  6654. "name": "prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack) -> Tensor Y",
  6655. "category": "Layer"
  6656. },
  6657. {
  6658. "name": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]"
  6659. },
  6660. {
  6661. "name": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]"
  6662. },
  6663. {
  6664. "name": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]"
  6665. },
  6666. {
  6667. "name": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor"
  6668. },
  6669. {
  6670. "name": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)"
  6671. },
  6672. {
  6673. "name": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor",
  6674. "category": "Tensor"
  6675. },
  6676. {
  6677. "name": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  6678. },
  6679. {
  6680. "name": "aten::concat.names(Tensor[] tensors, str dim) -> Tensor",
  6681. "category": "Tensor"
  6682. },
  6683. {
  6684. "name": "aten::concat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  6685. },
  6686. {
  6687. "name": "aten::tanh(Tensor self) -> Tensor",
  6688. "category": "Activation"
  6689. },
  6690. {
  6691. "name": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)",
  6692. "category": "Activation"
  6693. },
  6694. {
  6695. "name": "aten::tanh.int(int a) -> float",
  6696. "category": "Activation"
  6697. },
  6698. {
  6699. "name": "aten::tanh.float(float a) -> float",
  6700. "category": "Activation"
  6701. },
  6702. {
  6703. "name": "aten::tanh.complex(complex a) -> complex",
  6704. "category": "Activation"
  6705. },
  6706. {
  6707. "name": "aten::tanh.Scalar(Scalar a) -> Scalar",
  6708. "category": "Activation"
  6709. },
  6710. {
  6711. "name": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  6712. },
  6713. {
  6714. "name": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  6715. },
  6716. {
  6717. "name": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  6718. },
  6719. {
  6720. "name": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  6721. },
  6722. {
  6723. "name": "aten::add_.t(t[](a!) self, t[] b) -> t[]"
  6724. },
  6725. {
  6726. "name": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)"
  6727. },
  6728. {
  6729. "name": "_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  6730. },
  6731. {
  6732. "name": "_caffe2::BatchPermutation(Tensor X, Tensor indices, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  6733. },
  6734. {
  6735. "name": "_caffe2::BoxWithNMSLimit(Tensor scores, Tensor boxes, Tensor batch_splits, float score_thresh, float nms, int detections_per_im, bool soft_nms_enabled, str soft_nms_method, float soft_nms_sigma, float soft_nms_min_score_thres, bool rotated, bool cls_agnostic_bbox_reg, bool input_boxes_include_bg_cls, bool output_classes_include_bg_cls, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor scores, Tensor boxes, Tensor classes, Tensor batch_splits, Tensor keeps, Tensor keeps_size)"
  6736. },
  6737. {
  6738. "name": "_caffe2::CollectAndDistributeFpnRpnProposals(Tensor[] input_list, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois, Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  6739. },
  6740. {
  6741. "name": "_caffe2::CollectRpnProposals(Tensor[] input_list, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois)"
  6742. },
  6743. {
  6744. "name": "_caffe2::CopyCPUToGPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  6745. },
  6746. {
  6747. "name": "_caffe2::CopyGPUToCPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  6748. },
  6749. {
  6750. "name": "_caffe2::DistributeFpnProposals(Tensor rois, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  6751. },
  6752. {
  6753. "name": "_caffe2::GenerateProposals(Tensor scores, Tensor bbox_deltas, Tensor im_info, Tensor anchors, float spatial_scale, int pre_nms_topN, int post_nms_topN, float nms_thresh, float min_size, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  6754. },
  6755. {
  6756. "name": "_caffe2::RoIAlign(Tensor features, Tensor rois, str order, float spatial_scale, int pooled_h, int pooled_w, int sampling_ratio, bool aligned, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  6757. },
  6758. {
  6759. "name": "aten::_cat(Tensor[] tensors, int dim=0) -> Tensor"
  6760. },
  6761. {
  6762. "name": "aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  6763. },
  6764. {
  6765. "name": "aten::arange.start_out_(Scalar start, Scalar end) -> Tensor"
  6766. },
  6767. {
  6768. "name": "aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor"
  6769. },
  6770. {
  6771. "name": "aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor"
  6772. },
  6773. {
  6774. "name": "neuron::forward_v2_1(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor _0"
  6775. },
  6776. {
  6777. "name": "prim::isinstance(Any to_check) -> bool"
  6778. },
  6779. {
  6780. "name": "prim::shape(Tensor self) -> int[]"
  6781. },
  6782. {
  6783. "name": "quantized_decomposed::dequantize_per_tensor.Tensor_out(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6784. },
  6785. {
  6786. "name": "quantized_decomposed::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6787. },
  6788. {
  6789. "name": "quantized_decomposed::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  6790. },
  6791. {
  6792. "name": "torch_scatter::cuda_version() -> int _0"
  6793. },
  6794. {
  6795. "name": "torch_scatter::gather_coo(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  6796. },
  6797. {
  6798. "name": "torch_scatter::gather_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  6799. },
  6800. {
  6801. "name": "torch_scatter::scatter_max(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  6802. },
  6803. {
  6804. "name": "torch_scatter::scatter_mean(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  6805. },
  6806. {
  6807. "name": "torch_scatter::scatter_min(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  6808. },
  6809. {
  6810. "name": "torch_scatter::scatter_mul(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  6811. },
  6812. {
  6813. "name": "torch_scatter::scatter_sum(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  6814. },
  6815. {
  6816. "name": "torch_scatter::segment_max_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  6817. },
  6818. {
  6819. "name": "torch_scatter::segment_max_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  6820. },
  6821. {
  6822. "name": "torch_scatter::segment_mean_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  6823. },
  6824. {
  6825. "name": "torch_scatter::segment_mean_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  6826. },
  6827. {
  6828. "name": "torch_scatter::segment_min_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  6829. },
  6830. {
  6831. "name": "torch_scatter::segment_min_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  6832. },
  6833. {
  6834. "name": "torch_scatter::segment_sum_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  6835. },
  6836. {
  6837. "name": "torch_scatter::segment_sum_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  6838. },
  6839. {
  6840. "name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)"
  6841. },
  6842. {
  6843. "name": "neuron::_execute_neuron(__torch__.torch.classes.neuron.Model _0, Tensor[] _1) -> Tensor[] _0"
  6844. },
  6845. {
  6846. "name": "neuron::_from_neuron(Tensor _0) -> Tensor _0"
  6847. },
  6848. {
  6849. "name": "neuron::_init_neuron() -> ()"
  6850. },
  6851. {
  6852. "name": "neuron::_load_collectives_neuron(__torch__.torch.classes.neuron.Model _0, int _1, int _2, int _3, int _4) -> ()"
  6853. },
  6854. {
  6855. "name": "neuron::_load_neuron(__torch__.torch.classes.neuron.Model _0) -> ()"
  6856. },
  6857. {
  6858. "name": "neuron::_parallel_executor_run(__torch__.torch.classes.neuron.ParallelExecutor _0, Tensor[] _1, int _2) -> Tensor[] _0"
  6859. },
  6860. {
  6861. "name": "neuron::_parallel_from_neuron(Tensor _0) -> Tensor[] _0"
  6862. },
  6863. {
  6864. "name": "neuron::_parallel_load(Dict(str, Tensor)[] _0) -> Dict(str, Tensor)[] _0"
  6865. },
  6866. {
  6867. "name": "neuron::_parallel_profile_start_neuron(__torch__.torch.classes.neuron.ParallelModel _0, str _1, int _2) -> str[] _0"
  6868. },
  6869. {
  6870. "name": "neuron::_parallel_profile_stop_neuron(str[] _0) -> ()"
  6871. },
  6872. {
  6873. "name": "neuron::_parallel_run_neuron(__torch__.torch.classes.neuron.ParallelModel _0, __torch__.torch.classes.neuron.ParallelTensorSet _1, __torch__.torch.classes.neuron.ParallelTensorSet _2) -> ()"
  6874. },
  6875. {
  6876. "name": "neuron::_parallel_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  6877. },
  6878. {
  6879. "name": "neuron::_parallel_to_neuron(Tensor[] _0) -> Tensor _0"
  6880. },
  6881. {
  6882. "name": "neuron::_parallel_write_neuron(Tensor _0, Tensor[] _1) -> ()"
  6883. },
  6884. {
  6885. "name": "neuron::_profile_start_neuron(__torch__.torch.classes.neuron.Model _0, str _1) -> ()"
  6886. },
  6887. {
  6888. "name": "neuron::_profile_stop_neuron(str _0) -> ()"
  6889. },
  6890. {
  6891. "name": "neuron::_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  6892. },
  6893. {
  6894. "name": "neuron::_to_neuron(Tensor _0, int _1) -> Tensor _0"
  6895. },
  6896. {
  6897. "name": "neuron::create_module_from_graph(str _0, str _1) -> str _0"
  6898. },
  6899. {
  6900. "name": "neuron::forward_1(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> Tensor _0"
  6901. },
  6902. {
  6903. "name": "neuron::forward_10(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  6904. },
  6905. {
  6906. "name": "neuron::forward_11(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  6907. },
  6908. {
  6909. "name": "neuron::forward_12(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  6910. },
  6911. {
  6912. "name": "neuron::forward_13(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  6913. },
  6914. {
  6915. "name": "neuron::forward_14(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  6916. },
  6917. {
  6918. "name": "neuron::forward_15(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  6919. },
  6920. {
  6921. "name": "neuron::forward_16(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  6922. },
  6923. {
  6924. "name": "neuron::forward_17(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  6925. },
  6926. {
  6927. "name": "neuron::forward_18(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  6928. },
  6929. {
  6930. "name": "neuron::forward_19(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  6931. },
  6932. {
  6933. "name": "neuron::forward_2(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  6934. },
  6935. {
  6936. "name": "neuron::forward_20(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  6937. },
  6938. {
  6939. "name": "neuron::forward_21(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  6940. },
  6941. {
  6942. "name": "neuron::forward_22(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  6943. },
  6944. {
  6945. "name": "neuron::forward_23(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  6946. },
  6947. {
  6948. "name": "neuron::forward_24(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  6949. },
  6950. {
  6951. "name": "neuron::forward_25(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  6952. },
  6953. {
  6954. "name": "neuron::forward_26(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  6955. },
  6956. {
  6957. "name": "neuron::forward_27(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  6958. },
  6959. {
  6960. "name": "neuron::forward_28(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  6961. },
  6962. {
  6963. "name": "neuron::forward_29(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  6964. },
  6965. {
  6966. "name": "neuron::forward_3(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  6967. },
  6968. {
  6969. "name": "neuron::forward_30(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  6970. },
  6971. {
  6972. "name": "neuron::forward_31(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  6973. },
  6974. {
  6975. "name": "neuron::forward_32(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  6976. },
  6977. {
  6978. "name": "neuron::forward_33(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  6979. },
  6980. {
  6981. "name": "neuron::forward_34(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33)"
  6982. },
  6983. {
  6984. "name": "neuron::forward_35(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  6985. },
  6986. {
  6987. "name": "neuron::forward_36(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  6988. },
  6989. {
  6990. "name": "neuron::forward_37(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  6991. },
  6992. {
  6993. "name": "neuron::forward_38(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  6994. },
  6995. {
  6996. "name": "neuron::forward_39(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  6997. },
  6998. {
  6999. "name": "neuron::forward_4(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7000. },
  7001. {
  7002. "name": "neuron::forward_40(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7003. },
  7004. {
  7005. "name": "neuron::forward_41(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7006. },
  7007. {
  7008. "name": "neuron::forward_42(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7009. },
  7010. {
  7011. "name": "neuron::forward_43(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7012. },
  7013. {
  7014. "name": "neuron::forward_44(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7015. },
  7016. {
  7017. "name": "neuron::forward_45(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7018. },
  7019. {
  7020. "name": "neuron::forward_46(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7021. },
  7022. {
  7023. "name": "neuron::forward_47(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7024. },
  7025. {
  7026. "name": "neuron::forward_48(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7027. },
  7028. {
  7029. "name": "neuron::forward_49(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7030. },
  7031. {
  7032. "name": "neuron::forward_5(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7033. },
  7034. {
  7035. "name": "neuron::forward_50(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7036. },
  7037. {
  7038. "name": "neuron::forward_51(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7039. },
  7040. {
  7041. "name": "neuron::forward_52(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7042. },
  7043. {
  7044. "name": "neuron::forward_53(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7045. },
  7046. {
  7047. "name": "neuron::forward_54(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7048. },
  7049. {
  7050. "name": "neuron::forward_55(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7051. },
  7052. {
  7053. "name": "neuron::forward_56(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7054. },
  7055. {
  7056. "name": "neuron::forward_57(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7057. },
  7058. {
  7059. "name": "neuron::forward_58(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7060. },
  7061. {
  7062. "name": "neuron::forward_59(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7063. },
  7064. {
  7065. "name": "neuron::forward_6(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7066. },
  7067. {
  7068. "name": "neuron::forward_60(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7069. },
  7070. {
  7071. "name": "neuron::forward_61(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7072. },
  7073. {
  7074. "name": "neuron::forward_62(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7075. },
  7076. {
  7077. "name": "neuron::forward_63(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7078. },
  7079. {
  7080. "name": "neuron::forward_64(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7081. },
  7082. {
  7083. "name": "neuron::forward_7(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7084. },
  7085. {
  7086. "name": "neuron::forward_8(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7087. },
  7088. {
  7089. "name": "neuron::forward_9(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7090. },
  7091. {
  7092. "name": "neuron::forward_v2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor[] _0"
  7093. },
  7094. {
  7095. "name": "neuron::forward_v2_10(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  7096. },
  7097. {
  7098. "name": "neuron::forward_v2_11(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  7099. },
  7100. {
  7101. "name": "neuron::forward_v2_12(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  7102. },
  7103. {
  7104. "name": "neuron::forward_v2_13(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  7105. },
  7106. {
  7107. "name": "neuron::forward_v2_14(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  7108. },
  7109. {
  7110. "name": "neuron::forward_v2_15(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  7111. },
  7112. {
  7113. "name": "neuron::forward_v2_16(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  7114. },
  7115. {
  7116. "name": "neuron::forward_v2_17(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  7117. },
  7118. {
  7119. "name": "neuron::forward_v2_18(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  7120. },
  7121. {
  7122. "name": "neuron::forward_v2_19(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  7123. },
  7124. {
  7125. "name": "neuron::forward_v2_2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1)"
  7126. },
  7127. {
  7128. "name": "neuron::forward_v2_20(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  7129. },
  7130. {
  7131. "name": "neuron::forward_v2_21(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  7132. },
  7133. {
  7134. "name": "neuron::forward_v2_22(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  7135. },
  7136. {
  7137. "name": "neuron::forward_v2_23(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  7138. },
  7139. {
  7140. "name": "neuron::forward_v2_24(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  7141. },
  7142. {
  7143. "name": "neuron::forward_v2_25(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  7144. },
  7145. {
  7146. "name": "neuron::forward_v2_26(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  7147. },
  7148. {
  7149. "name": "neuron::forward_v2_27(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  7150. },
  7151. {
  7152. "name": "neuron::forward_v2_28(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  7153. },
  7154. {
  7155. "name": "neuron::forward_v2_29(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  7156. },
  7157. {
  7158. "name": "neuron::forward_v2_3(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2)"
  7159. },
  7160. {
  7161. "name": "neuron::forward_v2_30(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  7162. },
  7163. {
  7164. "name": "neuron::forward_v2_31(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  7165. },
  7166. {
  7167. "name": "neuron::forward_v2_32(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  7168. },
  7169. {
  7170. "name": "neuron::forward_v2_33(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  7171. },
  7172. {
  7173. "name": "neuron::forward_v2_35(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  7174. },
  7175. {
  7176. "name": "neuron::forward_v2_36(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  7177. },
  7178. {
  7179. "name": "neuron::forward_v2_37(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  7180. },
  7181. {
  7182. "name": "neuron::forward_v2_38(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  7183. },
  7184. {
  7185. "name": "neuron::forward_v2_39(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  7186. },
  7187. {
  7188. "name": "neuron::forward_v2_4(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7189. },
  7190. {
  7191. "name": "neuron::forward_v2_40(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7192. },
  7193. {
  7194. "name": "neuron::forward_v2_41(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7195. },
  7196. {
  7197. "name": "neuron::forward_v2_42(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7198. },
  7199. {
  7200. "name": "neuron::forward_v2_43(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7201. },
  7202. {
  7203. "name": "neuron::forward_v2_44(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7204. },
  7205. {
  7206. "name": "neuron::forward_v2_45(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7207. },
  7208. {
  7209. "name": "neuron::forward_v2_46(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7210. },
  7211. {
  7212. "name": "neuron::forward_v2_47(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7213. },
  7214. {
  7215. "name": "neuron::forward_v2_48(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7216. },
  7217. {
  7218. "name": "neuron::forward_v2_49(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7219. },
  7220. {
  7221. "name": "neuron::forward_v2_5(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7222. },
  7223. {
  7224. "name": "neuron::forward_v2_50(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7225. },
  7226. {
  7227. "name": "neuron::forward_v2_51(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7228. },
  7229. {
  7230. "name": "neuron::forward_v2_52(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7231. },
  7232. {
  7233. "name": "neuron::forward_v2_53(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7234. },
  7235. {
  7236. "name": "neuron::forward_v2_54(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7237. },
  7238. {
  7239. "name": "neuron::forward_v2_55(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7240. },
  7241. {
  7242. "name": "neuron::forward_v2_56(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7243. },
  7244. {
  7245. "name": "neuron::forward_v2_57(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7246. },
  7247. {
  7248. "name": "neuron::forward_v2_58(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7249. },
  7250. {
  7251. "name": "neuron::forward_v2_59(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7252. },
  7253. {
  7254. "name": "neuron::forward_v2_6(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7255. },
  7256. {
  7257. "name": "neuron::forward_v2_60(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7258. },
  7259. {
  7260. "name": "neuron::forward_v2_61(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7261. },
  7262. {
  7263. "name": "neuron::forward_v2_62(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7264. },
  7265. {
  7266. "name": "neuron::forward_v2_63(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7267. },
  7268. {
  7269. "name": "neuron::forward_v2_64(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7270. },
  7271. {
  7272. "name": "neuron::forward_v2_7(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7273. },
  7274. {
  7275. "name": "neuron::forward_v2_8(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7276. },
  7277. {
  7278. "name": "neuron::forward_v2_9(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7279. },
  7280. {
  7281. "name": "neuron::rnn(Tensor _0, Tensor[] _1, __torch__.torch.classes.neuron.RnnBinding _2, int _3) -> (Tensor _0, Tensor[] _1)"
  7282. },
  7283. {
  7284. "name": "neuron::rnn_v2(Tensor _0, Tensor _1, Tensor _2, int _3, __torch__.torch.classes.neuron.RnnBinding_v2[] _4) -> (Tensor _0, Tensor _1, Tensor _2)"
  7285. },
  7286. {
  7287. "name": "horizon::scale_quanti(Tensor x, Tensor scale, Tensor zero_point, int d, int min, int max, bool flag1, bool flat2, str str1, str str2) -> Tensor"
  7288. },
  7289. {
  7290. "name": "torch_sparse::ptr2ind(Tensor _0, int _1) -> Tensor _0"
  7291. },
  7292. {
  7293. "name": "torch_sparse::ind2ptr(Tensor _0, int _1) -> Tensor _0"
  7294. },
  7295. {
  7296. "name": "torch_sparse::hgt_sample(Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, int[]) _3, int _4) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  7297. },
  7298. {
  7299. "name": "torch_sparse::cuda_version() -> int _0"
  7300. },
  7301. {
  7302. "name": "torch_sparse::random_walk(Tensor _0, Tensor _1, Tensor _2, int _3) -> Tensor _0"
  7303. },
  7304. {
  7305. "name": "torch_sparse::partition2(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5) -> Tensor _0"
  7306. },
  7307. {
  7308. "name": "torch_sparse::ego_k_hop_sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, int _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7309. },
  7310. {
  7311. "name": "torch_sparse::sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, bool _4) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7312. },
  7313. {
  7314. "name": "torch_sparse::neighbor_sample(Tensor _0, Tensor _1, Tensor _2, int[] _3, bool _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7315. },
  7316. {
  7317. "name": "torch_sparse::hetero_temporal_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, Dict(str, Tensor) _6, int _7, bool _8, bool _9) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  7318. },
  7319. {
  7320. "name": "torch_sparse::partition(Tensor _0, Tensor _1, Tensor? _2, int _3, bool _4) -> Tensor _0"
  7321. },
  7322. {
  7323. "name": "torch_sparse::hetero_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, int _6, bool _7, bool _8) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  7324. },
  7325. {
  7326. "name": "torch_sparse::spmm_mean(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor? _6, Tensor _7) -> Tensor _0"
  7327. },
  7328. {
  7329. "name": "torch_sparse::spmm_max(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  7330. },
  7331. {
  7332. "name": "torch_sparse::relabel(Tensor _0, Tensor _1) -> (Tensor _0, Tensor _1)"
  7333. },
  7334. {
  7335. "name": "torch_sparse::relabel_one_hop(Tensor _0, Tensor _1, Tensor? _2, Tensor _3, bool _4) -> (Tensor _0, Tensor _1, Tensor? _2, Tensor _3)"
  7336. },
  7337. {
  7338. "name": "torch_sparse::spmm_sum(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor _6) -> Tensor _0"
  7339. },
  7340. {
  7341. "name": "torch_sparse::spmm_min(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  7342. },
  7343. {
  7344. "name": "torch_sparse::mt_partition(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5, int _6) -> Tensor _0"
  7345. },
  7346. {
  7347. "name": "torch_sparse::saint_subgraph(Tensor _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  7348. },
  7349. {
  7350. "name": "torch_sparse::non_diag_mask(Tensor _0, Tensor _1, int _2, int _3, int _4) -> Tensor _0"
  7351. },
  7352. {
  7353. "name": "executorch_prim::et_view.default(Tensor self, int[] size) -> (Tensor out)"
  7354. },
  7355. {
  7356. "name": "__torch__.torch.classes.rnn.CellParamsBase",
  7357. "inputs": [
  7358. { "name": "type", "type": "string" },
  7359. { "name": "tensors", "type": "Tensor[]" },
  7360. { "name": "doubles", "type": "float64[]" },
  7361. { "name": "longs", "type": "int64[]" },
  7362. { "name": "packed_params", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase[]" }
  7363. ]
  7364. },
  7365. {
  7366. "name": "__torch__.torch.classes.xnnpack.Conv2dOpContext",
  7367. "inputs": [
  7368. { "name": "weight", "type": "Tensor" },
  7369. { "name": "bias", "type": "Tensor", "optional": true },
  7370. { "name": "stride", "type": "int64[]" },
  7371. { "name": "padding", "type": "int64[]" },
  7372. { "name": "dilation", "type": "int64[]" },
  7373. { "name": "groups", "type": "int64" },
  7374. { "name": "output_min", "type": "int64[]", "optional": true },
  7375. { "name": "output_max", "type": "int64[]", "optional": true }
  7376. ]
  7377. },
  7378. {
  7379. "name": "__torch__.torch.classes.xnnpack.LinearOpContext",
  7380. "inputs": [
  7381. { "name": "weight", "type": "Tensor" },
  7382. { "name": "bias", "type": "Tensor", "optional": true },
  7383. { "name": "output_min", "type": "int64[]", "optional": true },
  7384. { "name": "output_max", "type": "int64[]", "optional": true }
  7385. ]
  7386. },
  7387. {
  7388. "name": "torch.nn.modules.activation.ELU",
  7389. "category": "Activation"
  7390. },
  7391. {
  7392. "name": "torch.nn.modules.activation.GELU",
  7393. "category": "Activation"
  7394. },
  7395. {
  7396. "name": "torch.nn.modules.activation.GLU",
  7397. "category": "Activation"
  7398. },
  7399. {
  7400. "name": "torch.nn.modules.activation.Hardsigmoid",
  7401. "category": "Activation"
  7402. },
  7403. {
  7404. "name": "torch.nn.modules.activation.Hardswish",
  7405. "category": "Activation"
  7406. },
  7407. {
  7408. "name": "torch.nn.modules.activation.Hardtanh",
  7409. "category": "Activation"
  7410. },
  7411. {
  7412. "name": "torch.nn.modules.activation.LeakyReLU",
  7413. "category": "Activation"
  7414. },
  7415. {
  7416. "name": "torch.nn.modules.activation.LogSoftmax",
  7417. "category": "Activation"
  7418. },
  7419. {
  7420. "name": "torch.nn.modules.activation.PReLU",
  7421. "category": "Activation"
  7422. },
  7423. {
  7424. "name": "torch.nn.modules.activation.ReLU",
  7425. "category": "Activation",
  7426. "inputs": [
  7427. { "name": "inplace", "default": false, "visible": false },
  7428. { "name": "threshold", "default": 0 },
  7429. { "name": "value", "default": 0 }
  7430. ]
  7431. },
  7432. {
  7433. "name": "torch.nn.modules.activation.ReLU6",
  7434. "category": "Activation"
  7435. },
  7436. {
  7437. "name": "torch.nn.modules.activation.SiLU",
  7438. "category": "Activation"
  7439. },
  7440. {
  7441. "name": "torch.nn.modules.activation.Sigmoid",
  7442. "category": "Activation"
  7443. },
  7444. {
  7445. "name": "torch.nn.modules.activation.Softmax",
  7446. "category": "Activation"
  7447. },
  7448. {
  7449. "name": "torch.nn.modules.activation.Softmax2d",
  7450. "category": "Activation"
  7451. },
  7452. {
  7453. "name": "torch.nn.modules.activation.Softplus",
  7454. "category": "Activation"
  7455. },
  7456. {
  7457. "name": "torch.nn.modules.activation.Tanh",
  7458. "category": "Activation"
  7459. },
  7460. {
  7461. "name": "torch.nn.modules.batchnorm.BatchNorm1d",
  7462. "category": "Normalization"
  7463. },
  7464. {
  7465. "name": "torch.nn.modules.batchnorm.BatchNorm2d",
  7466. "category": "Normalization",
  7467. "inputs": [
  7468. { "name": "input" },
  7469. { "name": "weight" },
  7470. { "name": "bias" },
  7471. { "name": "running_mean" },
  7472. { "name": "running_var" },
  7473. { "name": "num_batches_tracked", "visible": false },
  7474. { "name": "eps", "default": 1e-05 },
  7475. { "name": "momentum", "default": 0.1 },
  7476. { "name": "affine", "default": true },
  7477. { "name": "track_running_stats", "default": true }
  7478. ]
  7479. },
  7480. {
  7481. "name": "torch.nn.modules.conv.Conv1d",
  7482. "category": "Layer",
  7483. "inputs": [
  7484. { "name": "input" },
  7485. { "name": "weight" },
  7486. { "name": "bias" },
  7487. { "name": "output_padding", "visible": false },
  7488. { "name": "in_channels", "visible": false },
  7489. { "name": "out_channels", "visible": false },
  7490. { "name": "groups", "default": 1 },
  7491. { "name": "transposed", "default": false },
  7492. { "name": "padding", "default": [ 0 ] },
  7493. { "name": "dilation", "default": [ 1 ] },
  7494. { "name": "stride", "default": [ 1 ] }
  7495. ]
  7496. },
  7497. {
  7498. "name": "torch.nn.modules.conv.Conv2d",
  7499. "category": "Layer",
  7500. "inputs": [
  7501. { "name": "input" },
  7502. { "name": "weight" },
  7503. { "name": "bias" },
  7504. { "name": "output_padding", "visible": false },
  7505. { "name": "in_channels", "visible": false },
  7506. { "name": "out_channels", "visible": false },
  7507. { "name": "groups", "default": 1 },
  7508. { "name": "transposed", "default": false },
  7509. { "name": "padding", "default": [ 0, 0 ] },
  7510. { "name": "dilation", "default": [ 1, 1 ] },
  7511. { "name": "stride", "default": [ 1, 1 ] }
  7512. ]
  7513. },
  7514. {
  7515. "name": "torch.nn.modules.conv.Conv3d",
  7516. "category": "Layer"
  7517. },
  7518. {
  7519. "name": "torch.nn.modules.conv.ConvTranspose1d",
  7520. "category": "Layer",
  7521. "inputs": [
  7522. { "name": "input" },
  7523. { "name": "weight" },
  7524. { "name": "bias" },
  7525. { "name": "output_padding", "visible": false },
  7526. { "name": "in_channels", "visible": false },
  7527. { "name": "out_channels", "visible": false },
  7528. { "name": "groups", "default": 1 },
  7529. { "name": "transposed", "default": true },
  7530. { "name": "padding", "default": [ 0 ] },
  7531. { "name": "dilation", "default": [ 1 ] },
  7532. { "name": "stride", "default": [ 1 ] }
  7533. ]
  7534. },
  7535. {
  7536. "name": "torch.nn.modules.conv.ConvTranspose2d",
  7537. "category": "Layer",
  7538. "inputs": [
  7539. { "name": "input" },
  7540. { "name": "weight" },
  7541. { "name": "bias" },
  7542. { "name": "output_padding", "visible": false },
  7543. { "name": "in_channels", "visible": false },
  7544. { "name": "out_channels", "visible": false },
  7545. { "name": "groups", "default": 1 },
  7546. { "name": "transposed", "default": true },
  7547. { "name": "padding", "default": [ 0, 0 ] },
  7548. { "name": "dilation", "default": [ 1, 1 ] },
  7549. { "name": "stride", "default": [ 1, 1 ] }
  7550. ]
  7551. },
  7552. {
  7553. "name": "torch.nn.modules.conv.ConvTranspose3d",
  7554. "category": "Layer"
  7555. },
  7556. {
  7557. "name": "torch.nn.modules.dropout.Dropout",
  7558. "category": "Dropout",
  7559. "inputs": [
  7560. { "name": "inplace", "default": false, "visible": false },
  7561. { "name": "p", "default": 0.5 }
  7562. ]
  7563. },
  7564. {
  7565. "name": "torch.nn.modules.dropout.Dropout2d",
  7566. "category": "Dropout",
  7567. "inputs": [
  7568. { "name": "inplace", "default": false, "visible": false },
  7569. { "name": "p", "default": 0.5 }
  7570. ]
  7571. },
  7572. {
  7573. "name": "torch.nn.modules.instancenorm.InstanceNorm1d"
  7574. },
  7575. {
  7576. "name": "torch.nn.modules.instancenorm.InstanceNorm2d"
  7577. },
  7578. {
  7579. "name": "torch.nn.modules.instancenorm.InstanceNorm3d"
  7580. },
  7581. {
  7582. "name": "torch.nn.modules.linear.Linear",
  7583. "category": "Layer"
  7584. },
  7585. {
  7586. "name": "torch.nn.modules.normalization.CrossMapLRN2d",
  7587. "category": "Normalization",
  7588. "inputs": [
  7589. { "name": "alpha", "default": 0.0001 },
  7590. { "name": "beta", "default": 0.75 },
  7591. { "name": "k", "default": 1 }
  7592. ]
  7593. },
  7594. {
  7595. "name": "torch.nn.modules.normalization.GroupNorm",
  7596. "category": "Normalization"
  7597. },
  7598. {
  7599. "name": "torch.nn.modules.normalization.LayerNorm",
  7600. "category": "Normalization"
  7601. },
  7602. {
  7603. "name": "torch.nn.modules.padding.ConstantPad1d",
  7604. "category": "Tensor"
  7605. },
  7606. {
  7607. "name": "torch.nn.modules.padding.ConstantPad2d",
  7608. "category": "Tensor"
  7609. },
  7610. {
  7611. "name": "torch.nn.modules.padding.ConstantPad3d",
  7612. "category": "Tensor"
  7613. },
  7614. {
  7615. "name": "torch.nn.modules.padding.ReflectionPad1d",
  7616. "category": "Tensor"
  7617. },
  7618. {
  7619. "name": "torch.nn.modules.padding.ReflectionPad2d",
  7620. "category": "Tensor"
  7621. },
  7622. {
  7623. "name": "torch.nn.modules.padding.ReplicationPad1d",
  7624. "category": "Tensor"
  7625. },
  7626. {
  7627. "name": "torch.nn.modules.padding.ReplicationPad2d",
  7628. "category": "Tensor"
  7629. },
  7630. {
  7631. "name": "torch.nn.modules.padding.ReplicationPad3d",
  7632. "category": "Tensor"
  7633. },
  7634. {
  7635. "name": "torch.nn.modules.padding.ZeroPad2d",
  7636. "category": "Tensor"
  7637. },
  7638. {
  7639. "name": "torch.nn.modules.pixelshuffle.PixelShuffle"
  7640. },
  7641. {
  7642. "name": "torch.nn.modules.pooling.AdaptiveAvgPool1d",
  7643. "category": "Pool"
  7644. },
  7645. {
  7646. "name": "torch.nn.modules.pooling.AdaptiveAvgPool2d",
  7647. "category": "Pool"
  7648. },
  7649. {
  7650. "name": "torch.nn.modules.pooling.AdaptiveAvgPool3d",
  7651. "category": "Pool"
  7652. },
  7653. {
  7654. "name": "torch.nn.modules.pooling.AdaptiveMaxPool1d",
  7655. "category": "Pool"
  7656. },
  7657. {
  7658. "name": "torch.nn.modules.pooling.AdaptiveMaxPool2d",
  7659. "category": "Pool"
  7660. },
  7661. {
  7662. "name": "torch.nn.modules.pooling.AdaptiveMaxPool3d",
  7663. "category": "Pool"
  7664. },
  7665. {
  7666. "name": "torch.nn.modules.pooling.AvgPool2d",
  7667. "category": "Pool",
  7668. "inputs": [
  7669. { "name": "padding", "default": 0 },
  7670. { "name": "count_include_pad", "default": true },
  7671. { "name": "ceil_mode", "visible": false }
  7672. ]
  7673. },
  7674. {
  7675. "name": "torch.nn.modules.pooling.AvgPool3d",
  7676. "category": "Pool"
  7677. },
  7678. {
  7679. "name": "torch.nn.modules.pooling.MaxPool1d",
  7680. "category": "Pool"
  7681. },
  7682. {
  7683. "name": "torch.nn.modules.pooling.MaxPool2d",
  7684. "category": "Pool",
  7685. "inputs": [
  7686. { "name": "input" },
  7687. { "name": "padding", "default": 0 },
  7688. { "name": "dilation", "default": 1 },
  7689. { "name": "return_indices", "default": false },
  7690. { "name": "ceil_mode", "visible": false }
  7691. ]
  7692. },
  7693. {
  7694. "name": "torch.nn.modules.pooling.MaxPool3d",
  7695. "category": "Pool"
  7696. },
  7697. {
  7698. "name": "torch.nn.modules.pooling.MaxUnpool1d",
  7699. "category": "Pool"
  7700. },
  7701. {
  7702. "name": "torch.nn.modules.pooling.MaxUnpool2d",
  7703. "category": "Pool"
  7704. },
  7705. {
  7706. "name": "torch.nn.modules.pooling.MaxUnpool3d",
  7707. "category": "Pool"
  7708. },
  7709. {
  7710. "name": "torch.nn.modules.rnn.GRU",
  7711. "category": "Layer"
  7712. },
  7713. {
  7714. "name": "torch.nn.modules.rnn.GRUCell",
  7715. "category": "Layer"
  7716. },
  7717. {
  7718. "name": "torch.nn.modules.rnn.LSTM",
  7719. "category": "Layer",
  7720. "inputs": [
  7721. { "name": "input" },
  7722. { "name": "weight_ih_l0", "visible": false },
  7723. { "name": "weight_hh_l0", "visible": false },
  7724. { "name": "bias_ih_l0", "visible": false },
  7725. { "name": "bias_hh_l0", "visible": false },
  7726. { "name": "weight_ih_l1", "visible": false },
  7727. { "name": "weight_hh_l1", "visible": false },
  7728. { "name": "bias_ih_l1", "visible": false },
  7729. { "name": "bias_hh_l1", "visible": false },
  7730. { "name": "dropout", "default": 0 },
  7731. { "name": "dropout_state", "default": {} },
  7732. { "name": "num_layers", "default": 1 },
  7733. { "name": "batch_first", "visible": false },
  7734. { "name": "bidirectional", "visible": false },
  7735. { "name": "bias", "visible": false }
  7736. ]
  7737. },
  7738. {
  7739. "name": "torch.nn.modules.rnn.LSTMCell",
  7740. "category": "Layer"
  7741. },
  7742. {
  7743. "name": "torch.nn.modules.rnn.RNN",
  7744. "category": "Layer"
  7745. },
  7746. {
  7747. "name": "torch.nn.modules.sparse.Embedding",
  7748. "category": "Transform",
  7749. "inputs": [
  7750. { "name": "norm_type", "default": 2 },
  7751. { "name": "scale_grad_by_freq", "default": false },
  7752. { "name": "sparse", "default": false },
  7753. { "name": "max_norm", "default": null },
  7754. { "name": "padding_idx", "default": null }
  7755. ]
  7756. },
  7757. {
  7758. "name": "torch.nn.modules.upsampling.Upsample",
  7759. "category": "Data"
  7760. }
  7761. ]