pytorch-metadata.json 368 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861
  1. [
  2. {
  3. "name": "_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  4. },
  5. {
  6. "name": "_caffe2::BatchPermutation(Tensor X, Tensor indices, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  7. },
  8. {
  9. "name": "_caffe2::BoxWithNMSLimit(Tensor scores, Tensor boxes, Tensor batch_splits, float score_thresh, float nms, int detections_per_im, bool soft_nms_enabled, str soft_nms_method, float soft_nms_sigma, float soft_nms_min_score_thres, bool rotated, bool cls_agnostic_bbox_reg, bool input_boxes_include_bg_cls, bool output_classes_include_bg_cls, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor scores, Tensor boxes, Tensor classes, Tensor batch_splits, Tensor keeps, Tensor keeps_size)"
  10. },
  11. {
  12. "name": "_caffe2::CollectAndDistributeFpnRpnProposals(Tensor[] input_list, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois, Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  13. },
  14. {
  15. "name": "_caffe2::CollectRpnProposals(Tensor[] input_list, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois)"
  16. },
  17. {
  18. "name": "_caffe2::CopyCPUToGPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  19. },
  20. {
  21. "name": "_caffe2::CopyGPUToCPU(Tensor input, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  22. },
  23. {
  24. "name": "_caffe2::DistributeFpnProposals(Tensor rois, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)"
  25. },
  26. {
  27. "name": "_caffe2::GenerateProposals(Tensor scores, Tensor bbox_deltas, Tensor im_info, Tensor anchors, float spatial_scale, int pre_nms_topN, int post_nms_topN, float nms_thresh, float min_size, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one, Tensor[]? _caffe2_preallocated_outputs=None) -> (Tensor output_0, Tensor output_1)"
  28. },
  29. {
  30. "name": "_caffe2::RoIAlign(Tensor features, Tensor rois, str order, float spatial_scale, int pooled_h, int pooled_w, int sampling_ratio, bool aligned, Tensor[]? _caffe2_preallocated_outputs=None) -> Tensor"
  31. },
  32. {
  33. "name": "aqlm::code2x8_lut_matmat.out(Tensor input, Tensor codes, Tensor codebooks, Tensor scales, Tensor? bias, Tensor(a!) out) -> Tensor(a!)"
  34. },
  35. {
  36. "name": "aten::Bool.Tensor(Tensor a) -> bool"
  37. },
  38. {
  39. "name": "aten::Bool.int(int a) -> bool"
  40. },
  41. {
  42. "name": "aten::Bool.float(float a) -> bool"
  43. },
  44. {
  45. "name": "aten::Complex.Scalar(Scalar a) -> complex"
  46. },
  47. {
  48. "name": "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"
  49. },
  50. {
  51. "name": "aten::Complex.int_bool(int x, bool y) -> complex"
  52. },
  53. {
  54. "name": "aten::Complex.bool_int(bool x, int y) -> complex"
  55. },
  56. {
  57. "name": "aten::Complex.float_bool(float x, bool y) -> complex"
  58. },
  59. {
  60. "name": "aten::Complex.bool_float(bool x, float y) -> complex"
  61. },
  62. {
  63. "name": "aten::Complex.float_int(float x, int y) -> complex"
  64. },
  65. {
  66. "name": "aten::Complex.int_float(int x, float y) -> complex"
  67. },
  68. {
  69. "name": "aten::Complex.int_int(int x, int y) -> complex"
  70. },
  71. {
  72. "name": "aten::Complex.bool_bool(bool x, bool y) -> complex"
  73. },
  74. {
  75. "name": "aten::Complex.float_float(float x, float y) -> complex"
  76. },
  77. {
  78. "name": "aten::Complex.Tensor_float(Tensor x, float y) -> complex"
  79. },
  80. {
  81. "name": "aten::Complex.float_Tensor(float x, Tensor y) -> complex"
  82. },
  83. {
  84. "name": "aten::Complex.Tensor_int(Tensor x, int y) -> complex"
  85. },
  86. {
  87. "name": "aten::Complex.int_Tensor(int x, Tensor y) -> complex"
  88. },
  89. {
  90. "name": "aten::Complex.Tensor_bool(Tensor x, bool y) -> complex"
  91. },
  92. {
  93. "name": "aten::Complex.bool_Tensor(bool x, Tensor y) -> complex"
  94. },
  95. {
  96. "name": "aten::ComplexImplicit(Tensor a) -> complex"
  97. },
  98. {
  99. "name": "aten::Delete.t(t[](a!) self, int idx) -> ()"
  100. },
  101. {
  102. "name": "aten::Delete.Dict_str(Dict(str, t)(a!) self, str key) -> ()"
  103. },
  104. {
  105. "name": "aten::Delete.Dict_int(Dict(int, t)(a!) self, int key) -> ()"
  106. },
  107. {
  108. "name": "aten::Delete.Dict_bool(Dict(bool, t)(a!) self, bool key) -> ()"
  109. },
  110. {
  111. "name": "aten::Delete.Dict_float(Dict(float, t)(a!) self, float key) -> ()"
  112. },
  113. {
  114. "name": "aten::Delete.Dict_complex(Dict(complex, t)(a!) self, complex key) -> ()"
  115. },
  116. {
  117. "name": "aten::Delete.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> ()"
  118. },
  119. {
  120. "name": "aten::Float.Tensor(Tensor a) -> float"
  121. },
  122. {
  123. "name": "aten::Float.Scalar(Scalar a) -> float"
  124. },
  125. {
  126. "name": "aten::Float.int(int a) -> float"
  127. },
  128. {
  129. "name": "aten::Float.bool(bool a) -> float"
  130. },
  131. {
  132. "name": "aten::Float.str(str a) -> float"
  133. },
  134. {
  135. "name": "aten::FloatImplicit(Tensor a) -> float"
  136. },
  137. {
  138. "name": "aten::Int.Tensor(Tensor a) -> int"
  139. },
  140. {
  141. "name": "aten::Int.bool(bool a) -> int"
  142. },
  143. {
  144. "name": "aten::Int.float(float a) -> int"
  145. },
  146. {
  147. "name": "aten::Int.Scalar(Scalar a) -> int"
  148. },
  149. {
  150. "name": "aten::Int.str(str a) -> int"
  151. },
  152. {
  153. "name": "aten::IntImplicit(Tensor a) -> int"
  154. },
  155. {
  156. "name": "aten::ScalarImplicit(Tensor a) -> Scalar"
  157. },
  158. {
  159. "name": "aten::Size(int[] sizes) -> int[]"
  160. },
  161. {
  162. "name": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor"
  163. },
  164. {
  165. "name": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor"
  166. },
  167. {
  168. "name": "aten::__and__.bool(bool a, bool b) -> bool"
  169. },
  170. {
  171. "name": "aten::__and__.int(int a, int b) -> int"
  172. },
  173. {
  174. "name": "aten::__contains__.int_list(int[] l, int item) -> bool"
  175. },
  176. {
  177. "name": "aten::__contains__.str_list(str[] l, str item) -> bool"
  178. },
  179. {
  180. "name": "aten::__contains__.str(Dict(str, t) dict, str key) -> bool"
  181. },
  182. {
  183. "name": "aten::__contains__.int(Dict(int, t) dict, int key) -> bool"
  184. },
  185. {
  186. "name": "aten::__contains__.bool(Dict(bool, t) dict, bool key) -> bool"
  187. },
  188. {
  189. "name": "aten::__contains__.float(Dict(float, t) dict, float key) -> bool"
  190. },
  191. {
  192. "name": "aten::__contains__.complex(Dict(complex, t) dict, complex key) -> bool"
  193. },
  194. {
  195. "name": "aten::__contains__.Tensor(Dict(Tensor, t) dict, Tensor key) -> bool"
  196. },
  197. {
  198. "name": "aten::__contains__.float_list(float[] l, float item) -> bool"
  199. },
  200. {
  201. "name": "aten::__derive_index(int index, int start, int step) -> int"
  202. },
  203. {
  204. "name": "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"
  205. },
  206. {
  207. "name": "aten::__getitem__.str(str s, int index) -> str"
  208. },
  209. {
  210. "name": "aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)"
  211. },
  212. {
  213. "name": "aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)"
  214. },
  215. {
  216. "name": "aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)"
  217. },
  218. {
  219. "name": "aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)"
  220. },
  221. {
  222. "name": "aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)"
  223. },
  224. {
  225. "name": "aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)"
  226. },
  227. {
  228. "name": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  229. },
  230. {
  231. "name": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  232. },
  233. {
  234. "name": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  235. },
  236. {
  237. "name": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  238. },
  239. {
  240. "name": "aten::__interpolate.scale_list(Tensor input, int? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  241. },
  242. {
  243. "name": "aten::__interpolate.size_list_scale_list(Tensor input, int[]? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  244. },
  245. {
  246. "name": "aten::__interpolate(Tensor input, int? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  247. },
  248. {
  249. "name": "aten::__interpolate.size_list(Tensor input, int[]? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor"
  250. },
  251. {
  252. "name": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  253. },
  254. {
  255. "name": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  256. },
  257. {
  258. "name": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  259. },
  260. {
  261. "name": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  262. },
  263. {
  264. "name": "aten::__is__(t1 self, t2 obj) -> bool"
  265. },
  266. {
  267. "name": "aten::__isnot__(t1 self, t2 obj) -> bool"
  268. },
  269. {
  270. "name": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  271. },
  272. {
  273. "name": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  274. },
  275. {
  276. "name": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  277. },
  278. {
  279. "name": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  280. },
  281. {
  282. "name": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  283. },
  284. {
  285. "name": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  286. },
  287. {
  288. "name": "aten::__lshift__.int(int a, int b) -> int"
  289. },
  290. {
  291. "name": "aten::__not__(bool self) -> bool"
  292. },
  293. {
  294. "name": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor"
  295. },
  296. {
  297. "name": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor"
  298. },
  299. {
  300. "name": "aten::__or__.bool(bool a, bool b) -> bool"
  301. },
  302. {
  303. "name": "aten::__or__.int(int a, int b) -> int"
  304. },
  305. {
  306. "name": "aten::__range_length(int lo, int hi, int step) -> int"
  307. },
  308. {
  309. "name": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor"
  310. },
  311. {
  312. "name": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor"
  313. },
  314. {
  315. "name": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  316. },
  317. {
  318. "name": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  319. },
  320. {
  321. "name": "aten::__rshift__.int(int a, int b) -> int"
  322. },
  323. {
  324. "name": "aten::__upsample(Tensor input, int? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  325. "category": "Layer"
  326. },
  327. {
  328. "name": "aten::__upsample.size_list(Tensor input, int[]? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor",
  329. "category": "Layer"
  330. },
  331. {
  332. "name": "aten::__upsample_bilinear(Tensor input, int? size=None, int? scale_factor=None) -> Tensor"
  333. },
  334. {
  335. "name": "aten::__upsample_bilinear.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> Tensor"
  336. },
  337. {
  338. "name": "aten::__upsample_bilinear.scale_list(Tensor input, int? size=None, int[]? scale_factor=None) -> Tensor"
  339. },
  340. {
  341. "name": "aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size=None, int[]? scale_factor=None) -> Tensor"
  342. },
  343. {
  344. "name": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor"
  345. },
  346. {
  347. "name": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor"
  348. },
  349. {
  350. "name": "aten::__xor__.bool(bool a, bool b) -> bool"
  351. },
  352. {
  353. "name": "aten::__xor__.int(int a, int b) -> int"
  354. },
  355. {
  356. "name": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor"
  357. },
  358. {
  359. "name": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  360. },
  361. {
  362. "name": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  363. },
  364. {
  365. "name": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  366. },
  367. {
  368. "name": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  369. },
  370. {
  371. "name": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  372. },
  373. {
  374. "name": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  375. },
  376. {
  377. "name": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  378. },
  379. {
  380. "name": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)"
  381. },
  382. {
  383. "name": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)"
  384. },
  385. {
  386. "name": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  387. },
  388. {
  389. "name": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  390. },
  391. {
  392. "name": "aten::_assert_scalar(Scalar self, str assert_msg) -> ()"
  393. },
  394. {
  395. "name": "aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()"
  396. },
  397. {
  398. "name": "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)"
  399. },
  400. {
  401. "name": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)"
  402. },
  403. {
  404. "name": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor"
  405. },
  406. {
  407. "name": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor"
  408. },
  409. {
  410. "name": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor"
  411. },
  412. {
  413. "name": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor"
  414. },
  415. {
  416. "name": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor"
  417. },
  418. {
  419. "name": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor"
  420. },
  421. {
  422. "name": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor"
  423. },
  424. {
  425. "name": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor"
  426. },
  427. {
  428. "name": "aten::_cat(Tensor[] tensors, int dim=0) -> Tensor"
  429. },
  430. {
  431. "name": "aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  432. },
  433. {
  434. "name": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor"
  435. },
  436. {
  437. "name": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)"
  438. },
  439. {
  440. "name": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  441. },
  442. {
  443. "name": "aten::_coalesce(Tensor self) -> Tensor"
  444. },
  445. {
  446. "name": "aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)"
  447. },
  448. {
  449. "name": "aten::_conj(Tensor(a) self) -> Tensor(a)"
  450. },
  451. {
  452. "name": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor",
  453. "category": "Layer"
  454. },
  455. {
  456. "name": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
  457. "category": "Layer"
  458. },
  459. {
  460. "name": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)"
  461. },
  462. {
  463. "name": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor"
  464. },
  465. {
  466. "name": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  467. },
  468. {
  469. "name": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)"
  470. },
  471. {
  472. "name": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  473. },
  474. {
  475. "name": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  476. },
  477. {
  478. "name": "aten::_dim_arange(Tensor like, int dim) -> Tensor"
  479. },
  480. {
  481. "name": "aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)"
  482. },
  483. {
  484. "name": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  485. },
  486. {
  487. "name": "aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.) -> Tensor"
  488. },
  489. {
  490. "name": "aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)"
  491. },
  492. {
  493. "name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> Tensor",
  494. "category": "Quantization"
  495. },
  496. {
  497. "name": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)",
  498. "category": "Quantization"
  499. },
  500. {
  501. "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor, Tensor, Tensor)",
  502. "category": "Quantization"
  503. },
  504. {
  505. "name": "aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor"
  506. },
  507. {
  508. "name": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)"
  509. },
  510. {
  511. "name": "aten::_get_cpu_capability() -> str"
  512. },
  513. {
  514. "name": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool"
  515. },
  516. {
  517. "name": "aten::_indices(Tensor(a) self) -> Tensor(a)"
  518. },
  519. {
  520. "name": "aten::_infer_size(int[] a, int[] b) -> int[]"
  521. },
  522. {
  523. "name": "aten::_local_scalar_dense(Tensor self) -> Scalar"
  524. },
  525. {
  526. "name": "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"
  527. },
  528. {
  529. "name": "aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"
  530. },
  531. {
  532. "name": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor"
  533. },
  534. {
  535. "name": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)"
  536. },
  537. {
  538. "name": "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)"
  539. },
  540. {
  541. "name": "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)"
  542. },
  543. {
  544. "name": "aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))"
  545. },
  546. {
  547. "name": "aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  548. },
  549. {
  550. "name": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)",
  551. "category": "Normalization"
  552. },
  553. {
  554. "name": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
  555. "category": "Normalization"
  556. },
  557. {
  558. "name": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))",
  559. "category": "Normalization"
  560. },
  561. {
  562. "name": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)",
  563. "category": "Attention"
  564. },
  565. {
  566. "name": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  567. },
  568. {
  569. "name": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor"
  570. },
  571. {
  572. "name": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)"
  573. },
  574. {
  575. "name": "aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool"
  576. },
  577. {
  578. "name": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)"
  579. },
  580. {
  581. "name": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  582. },
  583. {
  584. "name": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)"
  585. },
  586. {
  587. "name": "aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor"
  588. },
  589. {
  590. "name": "aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  591. "category": "Activation"
  592. },
  593. {
  594. "name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0., bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)"
  595. },
  596. {
  597. "name": "aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)"
  598. },
  599. {
  600. "name": "aten::_scaled_dot_product_flash_attention.quantized(Tensor query, Tensor key, Tensor value, Tensor? q_descale, Tensor? k_descale, Tensor? v_descale, float dropout_p=0., bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)"
  601. },
  602. {
  603. "name": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)"
  604. },
  605. {
  606. "name": "aten::_set_item.t(t[](a!) l, int idx, t(b -> *) el) -> t[](a!)"
  607. },
  608. {
  609. "name": "aten::_set_item.str(Dict(str, t)(a!) l, str(b -> *) idx, t(c -> *) v) -> ()"
  610. },
  611. {
  612. "name": "aten::_set_item.int(Dict(int, t)(a!) l, int(b -> *) idx, t(c -> *) v) -> ()"
  613. },
  614. {
  615. "name": "aten::_set_item.bool(Dict(bool, t)(a!) l, bool(b -> *) idx, t(c -> *) v) -> ()"
  616. },
  617. {
  618. "name": "aten::_set_item.float(Dict(float, t)(a!) l, float(b -> *) idx, t(c -> *) v) -> ()"
  619. },
  620. {
  621. "name": "aten::_set_item.complex(Dict(complex, t)(a!) l, complex(b -> *) idx, t(c -> *) v) -> ()"
  622. },
  623. {
  624. "name": "aten::_set_item.Tensor(Dict(Tensor, t)(a!) l, Tensor(b -> *) idx, t(c -> *) v) -> ()"
  625. },
  626. {
  627. "name": "aten::_shape_as_tensor(Tensor self) -> Tensor"
  628. },
  629. {
  630. "name": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor",
  631. "category": "Activation"
  632. },
  633. {
  634. "name": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"
  635. },
  636. {
  637. "name": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor",
  638. "category": "Tensor"
  639. },
  640. {
  641. "name": "aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor"
  642. },
  643. {
  644. "name": "aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor"
  645. },
  646. {
  647. "name": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor"
  648. },
  649. {
  650. "name": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)"
  651. },
  652. {
  653. "name": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  654. },
  655. {
  656. "name": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)"
  657. },
  658. {
  659. "name": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  660. },
  661. {
  662. "name": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor"
  663. },
  664. {
  665. "name": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  666. },
  667. {
  668. "name": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor"
  669. },
  670. {
  671. "name": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)"
  672. },
  673. {
  674. "name": "aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)"
  675. },
  676. {
  677. "name": "aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  678. },
  679. {
  680. "name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  681. },
  682. {
  683. "name": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  684. },
  685. {
  686. "name": "aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"
  687. },
  688. {
  689. "name": "aten::_unsafe_index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"
  690. },
  691. {
  692. "name": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor"
  693. },
  694. {
  695. "name": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  696. },
  697. {
  698. "name": "aten::_unwrap_optional(t(a)? optional) -> t(a)"
  699. },
  700. {
  701. "name": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  702. },
  703. {
  704. "name": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  705. },
  706. {
  707. "name": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  708. },
  709. {
  710. "name": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor"
  711. },
  712. {
  713. "name": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  714. },
  715. {
  716. "name": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  717. },
  718. {
  719. "name": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor"
  720. },
  721. {
  722. "name": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  723. },
  724. {
  725. "name": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  726. },
  727. {
  728. "name": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  729. },
  730. {
  731. "name": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  732. },
  733. {
  734. "name": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  735. },
  736. {
  737. "name": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  738. },
  739. {
  740. "name": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  741. },
  742. {
  743. "name": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor"
  744. },
  745. {
  746. "name": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor"
  747. },
  748. {
  749. "name": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  750. },
  751. {
  752. "name": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)"
  753. },
  754. {
  755. "name": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  756. },
  757. {
  758. "name": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)"
  759. },
  760. {
  761. "name": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  762. },
  763. {
  764. "name": "aten::abs(Tensor self) -> Tensor"
  765. },
  766. {
  767. "name": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  768. },
  769. {
  770. "name": "aten::abs_(Tensor(a!) self) -> Tensor(a!)"
  771. },
  772. {
  773. "name": "aten::acos(Tensor self) -> Tensor"
  774. },
  775. {
  776. "name": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  777. },
  778. {
  779. "name": "aten::acos.int(int a) -> float"
  780. },
  781. {
  782. "name": "aten::acos.float(float a) -> float"
  783. },
  784. {
  785. "name": "aten::acos.complex(complex a) -> complex"
  786. },
  787. {
  788. "name": "aten::acos.Scalar(Scalar a) -> Scalar"
  789. },
  790. {
  791. "name": "aten::acos_(Tensor(a!) self) -> Tensor(a!)"
  792. },
  793. {
  794. "name": "aten::acosh(Tensor self) -> Tensor"
  795. },
  796. {
  797. "name": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  798. },
  799. {
  800. "name": "aten::acosh.int(int a) -> float"
  801. },
  802. {
  803. "name": "aten::acosh.float(float a) -> float"
  804. },
  805. {
  806. "name": "aten::acosh.complex(complex a) -> complex"
  807. },
  808. {
  809. "name": "aten::acosh.Scalar(Scalar a) -> Scalar"
  810. },
  811. {
  812. "name": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)"
  813. },
  814. {
  815. "name": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor",
  816. "category": "Pool"
  817. },
  818. {
  819. "name": "aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  820. },
  821. {
  822. "name": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor",
  823. "category": "Pool"
  824. },
  825. {
  826. "name": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)",
  827. "category": "Pool"
  828. },
  829. {
  830. "name": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor",
  831. "category": "Pool"
  832. },
  833. {
  834. "name": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  835. },
  836. {
  837. "name": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)",
  838. "category": "Pool"
  839. },
  840. {
  841. "name": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)",
  842. "category": "Pool"
  843. },
  844. {
  845. "name": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  846. },
  847. {
  848. "name": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)",
  849. "category": "Pool"
  850. },
  851. {
  852. "name": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  853. },
  854. {
  855. "name": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  856. },
  857. {
  858. "name": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  859. },
  860. {
  861. "name": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  862. },
  863. {
  864. "name": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  865. },
  866. {
  867. "name": "aten::add.t(t[] a, t[] b) -> t[]"
  868. },
  869. {
  870. "name": "aten::add.str(str a, str b) -> str"
  871. },
  872. {
  873. "name": "aten::add.int(int a, int b) -> int"
  874. },
  875. {
  876. "name": "aten::add.complex(complex a, complex b) -> complex"
  877. },
  878. {
  879. "name": "aten::add.float(float a, float b) -> float"
  880. },
  881. {
  882. "name": "aten::add.int_complex(int a, complex b) -> complex"
  883. },
  884. {
  885. "name": "aten::add.complex_int(complex a, int b) -> complex"
  886. },
  887. {
  888. "name": "aten::add.float_complex(float a, complex b) -> complex"
  889. },
  890. {
  891. "name": "aten::add.complex_float(complex a, float b) -> complex"
  892. },
  893. {
  894. "name": "aten::add.int_float(int a, float b) -> float"
  895. },
  896. {
  897. "name": "aten::add.float_int(float a, int b) -> float"
  898. },
  899. {
  900. "name": "aten::add(Scalar a, Scalar b) -> Scalar"
  901. },
  902. {
  903. "name": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  904. },
  905. {
  906. "name": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  907. },
  908. {
  909. "name": "aten::add_.t(t[](a!) self, t[] b) -> t[]"
  910. },
  911. {
  912. "name": "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  913. },
  914. {
  915. "name": "aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  916. },
  917. {
  918. "name": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  919. },
  920. {
  921. "name": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  922. },
  923. {
  924. "name": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"
  925. },
  926. {
  927. "name": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"
  928. },
  929. {
  930. "name": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)"
  931. },
  932. {
  933. "name": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
  934. "category": "Layer"
  935. },
  936. {
  937. "name": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  938. },
  939. {
  940. "name": "aten::addmm.dtype_out(Tensor self, Tensor mat1, Tensor mat2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  941. },
  942. {
  943. "name": "aten::addmm.dtype(Tensor self, Tensor mat1, Tensor mat2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  944. },
  945. {
  946. "name": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  947. },
  948. {
  949. "name": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  950. },
  951. {
  952. "name": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  953. },
  954. {
  955. "name": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  956. },
  957. {
  958. "name": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor"
  959. },
  960. {
  961. "name": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"
  962. },
  963. {
  964. "name": "aten::alias(Tensor(a) self) -> Tensor(a)"
  965. },
  966. {
  967. "name": "aten::alias_copy(Tensor self) -> Tensor"
  968. },
  969. {
  970. "name": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  971. },
  972. {
  973. "name": "aten::all(Tensor self) -> Tensor"
  974. },
  975. {
  976. "name": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  977. },
  978. {
  979. "name": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  980. },
  981. {
  982. "name": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  983. },
  984. {
  985. "name": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  986. },
  987. {
  988. "name": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  989. },
  990. {
  991. "name": "aten::all.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  992. },
  993. {
  994. "name": "aten::all.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  995. },
  996. {
  997. "name": "aten::all.int(int[] self) -> bool"
  998. },
  999. {
  1000. "name": "aten::all.float(float[] self) -> bool"
  1001. },
  1002. {
  1003. "name": "aten::all.bool(bool[] self) -> bool"
  1004. },
  1005. {
  1006. "name": "aten::allclose(Tensor self, Tensor other, float rtol=1.0000000000000001e-05, float atol=1e-08, bool equal_nan=False) -> bool"
  1007. },
  1008. {
  1009. "name": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  1010. "category": "Dropout"
  1011. },
  1012. {
  1013. "name": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  1014. "category": "Dropout"
  1015. },
  1016. {
  1017. "name": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  1018. },
  1019. {
  1020. "name": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1021. },
  1022. {
  1023. "name": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor"
  1024. },
  1025. {
  1026. "name": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1027. },
  1028. {
  1029. "name": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)"
  1030. },
  1031. {
  1032. "name": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)"
  1033. },
  1034. {
  1035. "name": "aten::angle(Tensor self) -> Tensor"
  1036. },
  1037. {
  1038. "name": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1039. },
  1040. {
  1041. "name": "aten::angle.int(int a) -> float"
  1042. },
  1043. {
  1044. "name": "aten::angle.float(float a) -> float"
  1045. },
  1046. {
  1047. "name": "aten::angle.complex(complex a) -> float"
  1048. },
  1049. {
  1050. "name": "aten::angle.Scalar(Scalar a) -> Scalar"
  1051. },
  1052. {
  1053. "name": "aten::any(Tensor self) -> Tensor"
  1054. },
  1055. {
  1056. "name": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor"
  1057. },
  1058. {
  1059. "name": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor"
  1060. },
  1061. {
  1062. "name": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1063. },
  1064. {
  1065. "name": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1066. },
  1067. {
  1068. "name": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1069. },
  1070. {
  1071. "name": "aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor"
  1072. },
  1073. {
  1074. "name": "aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1075. },
  1076. {
  1077. "name": "aten::any.str(str[] self) -> bool"
  1078. },
  1079. {
  1080. "name": "aten::any.int(int[] self) -> bool"
  1081. },
  1082. {
  1083. "name": "aten::any.float(float[] self) -> bool"
  1084. },
  1085. {
  1086. "name": "aten::any.bool(bool[] self) -> bool"
  1087. },
  1088. {
  1089. "name": "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"
  1090. },
  1091. {
  1092. "name": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1093. },
  1094. {
  1095. "name": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1096. },
  1097. {
  1098. "name": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  1099. },
  1100. {
  1101. "name": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"
  1102. },
  1103. {
  1104. "name": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
  1105. },
  1106. {
  1107. "name": "aten::arange.start_out_(Scalar start, Scalar end) -> Tensor"
  1108. },
  1109. {
  1110. "name": "aten::arctan(Tensor self) -> Tensor"
  1111. },
  1112. {
  1113. "name": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1114. },
  1115. {
  1116. "name": "aten::arctan2(Tensor self, Tensor other) -> Tensor"
  1117. },
  1118. {
  1119. "name": "aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1120. },
  1121. {
  1122. "name": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)"
  1123. },
  1124. {
  1125. "name": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  1126. },
  1127. {
  1128. "name": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1129. },
  1130. {
  1131. "name": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"
  1132. },
  1133. {
  1134. "name": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  1135. },
  1136. {
  1137. "name": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor"
  1138. },
  1139. {
  1140. "name": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor"
  1141. },
  1142. {
  1143. "name": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)"
  1144. },
  1145. {
  1146. "name": "aten::argsort.dimname(Tensor self, str dim, bool descending=False) -> Tensor"
  1147. },
  1148. {
  1149. "name": "aten::argwhere(Tensor self) -> Tensor"
  1150. },
  1151. {
  1152. "name": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)"
  1153. },
  1154. {
  1155. "name": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)"
  1156. },
  1157. {
  1158. "name": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  1159. },
  1160. {
  1161. "name": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  1162. },
  1163. {
  1164. "name": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor"
  1165. },
  1166. {
  1167. "name": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)"
  1168. },
  1169. {
  1170. "name": "aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1171. },
  1172. {
  1173. "name": "aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1174. },
  1175. {
  1176. "name": "aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1177. },
  1178. {
  1179. "name": "aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1180. },
  1181. {
  1182. "name": "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)"
  1183. },
  1184. {
  1185. "name": "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor"
  1186. },
  1187. {
  1188. "name": "aten::asin(Tensor self) -> Tensor"
  1189. },
  1190. {
  1191. "name": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1192. },
  1193. {
  1194. "name": "aten::asin.int(int a) -> float"
  1195. },
  1196. {
  1197. "name": "aten::asin.float(float a) -> float"
  1198. },
  1199. {
  1200. "name": "aten::asin.complex(complex a) -> complex"
  1201. },
  1202. {
  1203. "name": "aten::asin.Scalar(Scalar a) -> Scalar"
  1204. },
  1205. {
  1206. "name": "aten::asinh(Tensor self) -> Tensor"
  1207. },
  1208. {
  1209. "name": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1210. },
  1211. {
  1212. "name": "aten::asinh.int(int a) -> float"
  1213. },
  1214. {
  1215. "name": "aten::asinh.float(float a) -> float"
  1216. },
  1217. {
  1218. "name": "aten::asinh.complex(complex a) -> complex"
  1219. },
  1220. {
  1221. "name": "aten::asinh.Scalar(Scalar a) -> Scalar"
  1222. },
  1223. {
  1224. "name": "aten::atan(Tensor self) -> Tensor"
  1225. },
  1226. {
  1227. "name": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1228. },
  1229. {
  1230. "name": "aten::atan.int(int a) -> float"
  1231. },
  1232. {
  1233. "name": "aten::atan.float(float a) -> float"
  1234. },
  1235. {
  1236. "name": "aten::atan.complex(complex a) -> complex"
  1237. },
  1238. {
  1239. "name": "aten::atan.Scalar(Scalar a) -> Scalar"
  1240. },
  1241. {
  1242. "name": "aten::atan2(Tensor self, Tensor other) -> Tensor"
  1243. },
  1244. {
  1245. "name": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1246. },
  1247. {
  1248. "name": "aten::atan2.int(int a, int b) -> float"
  1249. },
  1250. {
  1251. "name": "aten::atan2.float(float a, float b) -> float"
  1252. },
  1253. {
  1254. "name": "aten::atan2.int_float(int a, float b) -> float"
  1255. },
  1256. {
  1257. "name": "aten::atan2.float_int(float a, int b) -> float"
  1258. },
  1259. {
  1260. "name": "aten::atan2.Scalar_Scalar(Scalar a, Scalar b) -> float"
  1261. },
  1262. {
  1263. "name": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1264. },
  1265. {
  1266. "name": "aten::atan_(Tensor(a!) self) -> Tensor(a!)"
  1267. },
  1268. {
  1269. "name": "aten::atanh(Tensor self) -> Tensor"
  1270. },
  1271. {
  1272. "name": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1273. },
  1274. {
  1275. "name": "aten::atanh.int(int a) -> float"
  1276. },
  1277. {
  1278. "name": "aten::atanh.float(float a) -> float"
  1279. },
  1280. {
  1281. "name": "aten::atanh.complex(complex a) -> complex"
  1282. },
  1283. {
  1284. "name": "aten::atanh.Scalar(Scalar a) -> Scalar"
  1285. },
  1286. {
  1287. "name": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)"
  1288. },
  1289. {
  1290. "name": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -> Tensor",
  1291. "category": "Pool"
  1292. },
  1293. {
  1294. "name": "aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)"
  1295. },
  1296. {
  1297. "name": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  1298. "category": "Pool"
  1299. },
  1300. {
  1301. "name": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  1302. },
  1303. {
  1304. "name": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor",
  1305. "category": "Pool"
  1306. },
  1307. {
  1308. "name": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"
  1309. },
  1310. {
  1311. "name": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1312. },
  1313. {
  1314. "name": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1315. },
  1316. {
  1317. "name": "aten::baddbmm.dtype_out(Tensor self, Tensor batch1, Tensor batch2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  1318. },
  1319. {
  1320. "name": "aten::baddbmm.dtype(Tensor self, Tensor batch1, Tensor batch2, ScalarType out_dtype, *, Scalar beta=1, Scalar alpha=1) -> Tensor"
  1321. },
  1322. {
  1323. "name": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)"
  1324. },
  1325. {
  1326. "name": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  1327. "category": "Normalization"
  1328. },
  1329. {
  1330. "name": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor"
  1331. },
  1332. {
  1333. "name": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1334. },
  1335. {
  1336. "name": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor"
  1337. },
  1338. {
  1339. "name": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor"
  1340. },
  1341. {
  1342. "name": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1343. },
  1344. {
  1345. "name": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  1346. },
  1347. {
  1348. "name": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)"
  1349. },
  1350. {
  1351. "name": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)"
  1352. },
  1353. {
  1354. "name": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor"
  1355. },
  1356. {
  1357. "name": "aten::bin(int i) -> str"
  1358. },
  1359. {
  1360. "name": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  1361. },
  1362. {
  1363. "name": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  1364. },
  1365. {
  1366. "name": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor"
  1367. },
  1368. {
  1369. "name": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) grad_input) -> Tensor(a!)"
  1370. },
  1371. {
  1372. "name": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1) -> Tensor"
  1373. },
  1374. {
  1375. "name": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  1376. },
  1377. {
  1378. "name": "aten::bincount(Tensor self, Tensor? weights=None, SymInt minlength=0) -> Tensor"
  1379. },
  1380. {
  1381. "name": "aten::bincount.out(Tensor self, Tensor? weights=None, SymInt minlength=0, *, Tensor(a!) out) -> Tensor(a!)"
  1382. },
  1383. {
  1384. "name": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor"
  1385. },
  1386. {
  1387. "name": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"
  1388. },
  1389. {
  1390. "name": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor"
  1391. },
  1392. {
  1393. "name": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor"
  1394. },
  1395. {
  1396. "name": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1397. },
  1398. {
  1399. "name": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1400. },
  1401. {
  1402. "name": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1403. },
  1404. {
  1405. "name": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1406. },
  1407. {
  1408. "name": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1409. },
  1410. {
  1411. "name": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1412. },
  1413. {
  1414. "name": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  1415. },
  1416. {
  1417. "name": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  1418. },
  1419. {
  1420. "name": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1421. },
  1422. {
  1423. "name": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1424. },
  1425. {
  1426. "name": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1427. },
  1428. {
  1429. "name": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1430. },
  1431. {
  1432. "name": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1433. },
  1434. {
  1435. "name": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1436. },
  1437. {
  1438. "name": "aten::bitwise_not(Tensor self) -> Tensor"
  1439. },
  1440. {
  1441. "name": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1442. },
  1443. {
  1444. "name": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)"
  1445. },
  1446. {
  1447. "name": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor"
  1448. },
  1449. {
  1450. "name": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor"
  1451. },
  1452. {
  1453. "name": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1454. },
  1455. {
  1456. "name": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1457. },
  1458. {
  1459. "name": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1460. },
  1461. {
  1462. "name": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1463. },
  1464. {
  1465. "name": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1466. },
  1467. {
  1468. "name": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1469. },
  1470. {
  1471. "name": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor"
  1472. },
  1473. {
  1474. "name": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor"
  1475. },
  1476. {
  1477. "name": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1478. },
  1479. {
  1480. "name": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1481. },
  1482. {
  1483. "name": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1484. },
  1485. {
  1486. "name": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1487. },
  1488. {
  1489. "name": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1490. },
  1491. {
  1492. "name": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1493. },
  1494. {
  1495. "name": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor"
  1496. },
  1497. {
  1498. "name": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor"
  1499. },
  1500. {
  1501. "name": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  1502. },
  1503. {
  1504. "name": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1505. },
  1506. {
  1507. "name": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  1508. },
  1509. {
  1510. "name": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  1511. },
  1512. {
  1513. "name": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1514. },
  1515. {
  1516. "name": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  1517. },
  1518. {
  1519. "name": "aten::block_diag(Tensor[] tensors) -> Tensor"
  1520. },
  1521. {
  1522. "name": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  1523. },
  1524. {
  1525. "name": "aten::bmm(Tensor self, Tensor mat2) -> Tensor"
  1526. },
  1527. {
  1528. "name": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  1529. },
  1530. {
  1531. "name": "aten::bmm.dtype_out(Tensor self, Tensor mat2, ScalarType out_dtype, *, Tensor(a!) out) -> Tensor(a!)"
  1532. },
  1533. {
  1534. "name": "aten::bmm.dtype(Tensor self, Tensor mat2, ScalarType out_dtype) -> Tensor"
  1535. },
  1536. {
  1537. "name": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]"
  1538. },
  1539. {
  1540. "name": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  1541. },
  1542. {
  1543. "name": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  1544. },
  1545. {
  1546. "name": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor"
  1547. },
  1548. {
  1549. "name": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  1550. },
  1551. {
  1552. "name": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)"
  1553. },
  1554. {
  1555. "name": "aten::cartesian_prod(Tensor[] tensors) -> Tensor"
  1556. },
  1557. {
  1558. "name": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
  1559. "category": "Tensor"
  1560. },
  1561. {
  1562. "name": "aten::cat.names(Tensor[] tensors, str dim) -> Tensor",
  1563. "category": "Tensor"
  1564. },
  1565. {
  1566. "name": "aten::cat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)",
  1567. "category": "Tensor"
  1568. },
  1569. {
  1570. "name": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)",
  1571. "category": "Tensor"
  1572. },
  1573. {
  1574. "name": "aten::cauchy_(Tensor(a!) self, float median=0., float sigma=1., *, Generator? generator=None) -> Tensor(a!)"
  1575. },
  1576. {
  1577. "name": "aten::cdist(Tensor x1, Tensor x2, float p=2., int? compute_mode=None) -> Tensor"
  1578. },
  1579. {
  1580. "name": "aten::ceil(Tensor self) -> Tensor"
  1581. },
  1582. {
  1583. "name": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1584. },
  1585. {
  1586. "name": "aten::ceil.int(int a) -> int"
  1587. },
  1588. {
  1589. "name": "aten::ceil.float(float a) -> int"
  1590. },
  1591. {
  1592. "name": "aten::ceil.Scalar(Scalar a) -> Scalar"
  1593. },
  1594. {
  1595. "name": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)"
  1596. },
  1597. {
  1598. "name": "aten::celu(Tensor self, Scalar alpha=1.) -> Tensor",
  1599. "category": "Activation"
  1600. },
  1601. {
  1602. "name": "aten::celu.out(Tensor self, Scalar alpha=1., *, Tensor(a!) out) -> Tensor(a!)"
  1603. },
  1604. {
  1605. "name": "aten::celu_(Tensor(a!) self, Scalar alpha=1.) -> Tensor(a!)"
  1606. },
  1607. {
  1608. "name": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor"
  1609. },
  1610. {
  1611. "name": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1612. },
  1613. {
  1614. "name": "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor"
  1615. },
  1616. {
  1617. "name": "aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)"
  1618. },
  1619. {
  1620. "name": "aten::chr(int i) -> str"
  1621. },
  1622. {
  1623. "name": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]"
  1624. },
  1625. {
  1626. "name": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  1627. },
  1628. {
  1629. "name": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  1630. },
  1631. {
  1632. "name": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  1633. },
  1634. {
  1635. "name": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  1636. },
  1637. {
  1638. "name": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  1639. },
  1640. {
  1641. "name": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  1642. },
  1643. {
  1644. "name": "aten::clamp_max(Tensor self, Scalar max) -> Tensor"
  1645. },
  1646. {
  1647. "name": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor"
  1648. },
  1649. {
  1650. "name": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)"
  1651. },
  1652. {
  1653. "name": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)"
  1654. },
  1655. {
  1656. "name": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)"
  1657. },
  1658. {
  1659. "name": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)"
  1660. },
  1661. {
  1662. "name": "aten::clamp_min(Tensor self, Scalar min) -> Tensor"
  1663. },
  1664. {
  1665. "name": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor"
  1666. },
  1667. {
  1668. "name": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)"
  1669. },
  1670. {
  1671. "name": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)"
  1672. },
  1673. {
  1674. "name": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)"
  1675. },
  1676. {
  1677. "name": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)"
  1678. },
  1679. {
  1680. "name": "aten::clear.t(t[](a!) self) -> ()"
  1681. },
  1682. {
  1683. "name": "aten::clear.str(Dict(str, t)(a!) self) -> ()"
  1684. },
  1685. {
  1686. "name": "aten::clear.int(Dict(int, t)(a!) self) -> ()"
  1687. },
  1688. {
  1689. "name": "aten::clear.bool(Dict(bool, t)(a!) self) -> ()"
  1690. },
  1691. {
  1692. "name": "aten::clear.float(Dict(float, t)(a!) self) -> ()"
  1693. },
  1694. {
  1695. "name": "aten::clear.complex(Dict(complex, t)(a!) self) -> ()"
  1696. },
  1697. {
  1698. "name": "aten::clear.Tensor(Dict(Tensor, t)(a!) self) -> ()"
  1699. },
  1700. {
  1701. "name": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"
  1702. },
  1703. {
  1704. "name": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor"
  1705. },
  1706. {
  1707. "name": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  1708. },
  1709. {
  1710. "name": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)"
  1711. },
  1712. {
  1713. "name": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)"
  1714. },
  1715. {
  1716. "name": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)"
  1717. },
  1718. {
  1719. "name": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor"
  1720. },
  1721. {
  1722. "name": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  1723. },
  1724. {
  1725. "name": "aten::coalesce(Tensor(a) self) -> Tensor(a)"
  1726. },
  1727. {
  1728. "name": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  1729. },
  1730. {
  1731. "name": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  1732. },
  1733. {
  1734. "name": "aten::col_indices(Tensor(a) self) -> Tensor(a)"
  1735. },
  1736. {
  1737. "name": "aten::column_stack(Tensor[] tensors) -> Tensor"
  1738. },
  1739. {
  1740. "name": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  1741. },
  1742. {
  1743. "name": "aten::complex(Tensor real, Tensor imag) -> Tensor"
  1744. },
  1745. {
  1746. "name": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)"
  1747. },
  1748. {
  1749. "name": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor",
  1750. "category": "Tensor"
  1751. },
  1752. {
  1753. "name": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  1754. },
  1755. {
  1756. "name": "aten::concat.names(Tensor[] tensors, str dim) -> Tensor",
  1757. "category": "Tensor"
  1758. },
  1759. {
  1760. "name": "aten::concat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  1761. },
  1762. {
  1763. "name": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor"
  1764. },
  1765. {
  1766. "name": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  1767. },
  1768. {
  1769. "name": "aten::concatenate.names(Tensor[] tensors, str dim) -> Tensor"
  1770. },
  1771. {
  1772. "name": "aten::concatenate.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  1773. },
  1774. {
  1775. "name": "aten::conj(Tensor(a) self) -> Tensor(a)"
  1776. },
  1777. {
  1778. "name": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor",
  1779. "category": "Tensor"
  1780. },
  1781. {
  1782. "name": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)"
  1783. },
  1784. {
  1785. "name": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=0) -> Tensor(a)"
  1786. },
  1787. {
  1788. "name": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  1789. "category": "Layer"
  1790. },
  1791. {
  1792. "name": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], str padding=\"valid\", SymInt[1] dilation=[1], SymInt groups=1) -> Tensor",
  1793. "category": "Layer"
  1794. },
  1795. {
  1796. "name": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  1797. "category": "Layer"
  1798. },
  1799. {
  1800. "name": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], str padding=\"valid\", SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor",
  1801. "category": "Layer"
  1802. },
  1803. {
  1804. "name": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  1805. "category": "Layer"
  1806. },
  1807. {
  1808. "name": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], str padding=\"valid\", SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor",
  1809. "category": "Layer"
  1810. },
  1811. {
  1812. "name": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] output_padding=[0], SymInt groups=1, SymInt[1] dilation=[1]) -> Tensor",
  1813. "category": "Layer"
  1814. },
  1815. {
  1816. "name": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] output_padding=[0, 0], SymInt groups=1, SymInt[2] dilation=[1, 1]) -> Tensor",
  1817. "category": "Layer"
  1818. },
  1819. {
  1820. "name": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] output_padding=[0, 0, 0], SymInt groups=1, SymInt[3] dilation=[1, 1, 1]) -> Tensor",
  1821. "category": "Layer"
  1822. },
  1823. {
  1824. "name": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor",
  1825. "category": "Layer"
  1826. },
  1827. {
  1828. "name": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)",
  1829. "category": "Layer"
  1830. },
  1831. {
  1832. "name": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  1833. },
  1834. {
  1835. "name": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1836. },
  1837. {
  1838. "name": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)"
  1839. },
  1840. {
  1841. "name": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  1842. },
  1843. {
  1844. "name": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor"
  1845. },
  1846. {
  1847. "name": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1848. },
  1849. {
  1850. "name": "aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor"
  1851. },
  1852. {
  1853. "name": "aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)"
  1854. },
  1855. {
  1856. "name": "aten::copy.t(t[](a) self) -> t[]"
  1857. },
  1858. {
  1859. "name": "aten::copy.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)"
  1860. },
  1861. {
  1862. "name": "aten::copy.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)"
  1863. },
  1864. {
  1865. "name": "aten::copy.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)"
  1866. },
  1867. {
  1868. "name": "aten::copy.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)"
  1869. },
  1870. {
  1871. "name": "aten::copy.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)"
  1872. },
  1873. {
  1874. "name": "aten::copy.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)"
  1875. },
  1876. {
  1877. "name": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)"
  1878. },
  1879. {
  1880. "name": "aten::copy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  1881. },
  1882. {
  1883. "name": "aten::copy_.int(Tensor(a!) self, int other) -> Tensor(a!)"
  1884. },
  1885. {
  1886. "name": "aten::copy_.float(Tensor(a!) self, float other) -> Tensor(a!)"
  1887. },
  1888. {
  1889. "name": "aten::cos(Tensor self) -> Tensor"
  1890. },
  1891. {
  1892. "name": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1893. },
  1894. {
  1895. "name": "aten::cos.int(int a) -> float"
  1896. },
  1897. {
  1898. "name": "aten::cos.float(float a) -> float"
  1899. },
  1900. {
  1901. "name": "aten::cos.complex(complex a) -> complex"
  1902. },
  1903. {
  1904. "name": "aten::cos.Scalar(Scalar a) -> Scalar"
  1905. },
  1906. {
  1907. "name": "aten::cosh(Tensor self) -> Tensor"
  1908. },
  1909. {
  1910. "name": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  1911. },
  1912. {
  1913. "name": "aten::cosh.int(int a) -> float"
  1914. },
  1915. {
  1916. "name": "aten::cosh.float(float a) -> float"
  1917. },
  1918. {
  1919. "name": "aten::cosh.complex(complex a) -> complex"
  1920. },
  1921. {
  1922. "name": "aten::cosh.Scalar(Scalar a) -> Scalar"
  1923. },
  1924. {
  1925. "name": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor"
  1926. },
  1927. {
  1928. "name": "aten::count(str self, str substr, int start=0, int end=-1) -> int"
  1929. },
  1930. {
  1931. "name": "aten::count.int(int[] self, int el) -> int"
  1932. },
  1933. {
  1934. "name": "aten::count.float(float[] self, float el) -> int"
  1935. },
  1936. {
  1937. "name": "aten::count.bool(bool[] self, bool el) -> int"
  1938. },
  1939. {
  1940. "name": "aten::count.Tensor(Tensor[] self, Tensor el) -> int"
  1941. },
  1942. {
  1943. "name": "aten::count.str(str[] self, str el) -> int"
  1944. },
  1945. {
  1946. "name": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor"
  1947. },
  1948. {
  1949. "name": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)"
  1950. },
  1951. {
  1952. "name": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor"
  1953. },
  1954. {
  1955. "name": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  1956. },
  1957. {
  1958. "name": "aten::cpu(Tensor(a) self) -> Tensor(a|b)"
  1959. },
  1960. {
  1961. "name": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"
  1962. },
  1963. {
  1964. "name": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  1965. },
  1966. {
  1967. "name": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, float label_smoothing=0.) -> Tensor"
  1968. },
  1969. {
  1970. "name": "aten::crow_indices(Tensor(a) self) -> Tensor(a)"
  1971. },
  1972. {
  1973. "name": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  1974. },
  1975. {
  1976. "name": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor"
  1977. },
  1978. {
  1979. "name": "aten::cuda(Tensor(a) self) -> Tensor(a|b)"
  1980. },
  1981. {
  1982. "name": "aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)"
  1983. },
  1984. {
  1985. "name": "aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  1986. },
  1987. {
  1988. "name": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1989. },
  1990. {
  1991. "name": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  1992. },
  1993. {
  1994. "name": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)"
  1995. },
  1996. {
  1997. "name": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor"
  1998. },
  1999. {
  2000. "name": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)"
  2001. },
  2002. {
  2003. "name": "aten::cummax.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)"
  2004. },
  2005. {
  2006. "name": "aten::cummax.dimname_out(Tensor self, str dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2007. },
  2008. {
  2009. "name": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  2010. },
  2011. {
  2012. "name": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor"
  2013. },
  2014. {
  2015. "name": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  2016. },
  2017. {
  2018. "name": "aten::cumprod.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  2019. },
  2020. {
  2021. "name": "aten::cumprod.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2022. },
  2023. {
  2024. "name": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2025. },
  2026. {
  2027. "name": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"
  2028. },
  2029. {
  2030. "name": "aten::cumsum.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor"
  2031. },
  2032. {
  2033. "name": "aten::cumsum.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2034. },
  2035. {
  2036. "name": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  2037. },
  2038. {
  2039. "name": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  2040. },
  2041. {
  2042. "name": "aten::cumsum_.dimname(Tensor(a!) self, str dim, *, ScalarType? dtype=None) -> Tensor(a!)"
  2043. },
  2044. {
  2045. "name": "aten::dequantize.self(Tensor self) -> Tensor",
  2046. "category": "Quantization"
  2047. },
  2048. {
  2049. "name": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2050. },
  2051. {
  2052. "name": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()"
  2053. },
  2054. {
  2055. "name": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]",
  2056. "category": "Quantization"
  2057. },
  2058. {
  2059. "name": "aten::dequantize.tensor(Tensor qtensor) -> Tensor",
  2060. "category": "Quantization"
  2061. },
  2062. {
  2063. "name": "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]",
  2064. "category": "Quantization"
  2065. },
  2066. {
  2067. "name": "aten::dequantize.any(Any tensors) -> Any",
  2068. "category": "Quantization"
  2069. },
  2070. {
  2071. "name": "aten::detach(Tensor(a) self) -> Tensor(a)"
  2072. },
  2073. {
  2074. "name": "aten::detach_(Tensor(a!) self) -> Tensor(a!)"
  2075. },
  2076. {
  2077. "name": "aten::detach_copy(Tensor self) -> Tensor"
  2078. },
  2079. {
  2080. "name": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2081. },
  2082. {
  2083. "name": "aten::device(str a) -> Device"
  2084. },
  2085. {
  2086. "name": "aten::device.with_index(str type, int index) -> Device"
  2087. },
  2088. {
  2089. "name": "aten::diag(Tensor self, int diagonal=0) -> Tensor"
  2090. },
  2091. {
  2092. "name": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  2093. },
  2094. {
  2095. "name": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor"
  2096. },
  2097. {
  2098. "name": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)"
  2099. },
  2100. {
  2101. "name": "aten::diagflat(Tensor self, int offset=0) -> Tensor"
  2102. },
  2103. {
  2104. "name": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)"
  2105. },
  2106. {
  2107. "name": "aten::diagonal.Dimname(Tensor(a) self, *, str outdim, str dim1, str dim2, int offset=0) -> Tensor(a)"
  2108. },
  2109. {
  2110. "name": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor"
  2111. },
  2112. {
  2113. "name": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)"
  2114. },
  2115. {
  2116. "name": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  2117. },
  2118. {
  2119. "name": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  2120. },
  2121. {
  2122. "name": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor"
  2123. },
  2124. {
  2125. "name": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)"
  2126. },
  2127. {
  2128. "name": "aten::dict() -> Dict(str, Tensor)"
  2129. },
  2130. {
  2131. "name": "aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)"
  2132. },
  2133. {
  2134. "name": "aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)"
  2135. },
  2136. {
  2137. "name": "aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)"
  2138. },
  2139. {
  2140. "name": "aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)"
  2141. },
  2142. {
  2143. "name": "aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)"
  2144. },
  2145. {
  2146. "name": "aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)"
  2147. },
  2148. {
  2149. "name": "aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)"
  2150. },
  2151. {
  2152. "name": "aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)"
  2153. },
  2154. {
  2155. "name": "aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)"
  2156. },
  2157. {
  2158. "name": "aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)"
  2159. },
  2160. {
  2161. "name": "aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)"
  2162. },
  2163. {
  2164. "name": "aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)"
  2165. },
  2166. {
  2167. "name": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor"
  2168. },
  2169. {
  2170. "name": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)"
  2171. },
  2172. {
  2173. "name": "aten::dim(Tensor self) -> int"
  2174. },
  2175. {
  2176. "name": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor"
  2177. },
  2178. {
  2179. "name": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  2180. },
  2181. {
  2182. "name": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor"
  2183. },
  2184. {
  2185. "name": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor"
  2186. },
  2187. {
  2188. "name": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  2189. },
  2190. {
  2191. "name": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  2192. },
  2193. {
  2194. "name": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2195. },
  2196. {
  2197. "name": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2198. },
  2199. {
  2200. "name": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2201. },
  2202. {
  2203. "name": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2204. },
  2205. {
  2206. "name": "aten::div.int(int a, int b) -> float"
  2207. },
  2208. {
  2209. "name": "aten::div.complex(complex a, complex b) -> complex"
  2210. },
  2211. {
  2212. "name": "aten::div.float(float a, float b) -> float"
  2213. },
  2214. {
  2215. "name": "aten::div(Scalar a, Scalar b) -> float"
  2216. },
  2217. {
  2218. "name": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2219. },
  2220. {
  2221. "name": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  2222. },
  2223. {
  2224. "name": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2225. },
  2226. {
  2227. "name": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  2228. },
  2229. {
  2230. "name": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor"
  2231. },
  2232. {
  2233. "name": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor"
  2234. },
  2235. {
  2236. "name": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"
  2237. },
  2238. {
  2239. "name": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"
  2240. },
  2241. {
  2242. "name": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2243. },
  2244. {
  2245. "name": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"
  2246. },
  2247. {
  2248. "name": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2249. },
  2250. {
  2251. "name": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"
  2252. },
  2253. {
  2254. "name": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"
  2255. },
  2256. {
  2257. "name": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2258. },
  2259. {
  2260. "name": "aten::divmod.int(int x, int y) -> (int, int)"
  2261. },
  2262. {
  2263. "name": "aten::divmod.float(float x, float y) -> (float, float)"
  2264. },
  2265. {
  2266. "name": "aten::divmod.int_float(int x, float y) -> (float, float)"
  2267. },
  2268. {
  2269. "name": "aten::divmod.float_int(float x, int y) -> (float, float)"
  2270. },
  2271. {
  2272. "name": "aten::dot(Tensor self, Tensor tensor) -> Tensor"
  2273. },
  2274. {
  2275. "name": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)"
  2276. },
  2277. {
  2278. "name": "aten::dropout(Tensor input, float p, bool train) -> Tensor",
  2279. "category": "Dropout"
  2280. },
  2281. {
  2282. "name": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2283. "category": "Dropout"
  2284. },
  2285. {
  2286. "name": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor"
  2287. },
  2288. {
  2289. "name": "aten::einsum.sublist(Tensor a, ...) -> Tensor"
  2290. },
  2291. {
  2292. "name": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor",
  2293. "category": "Activation"
  2294. },
  2295. {
  2296. "name": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)"
  2297. },
  2298. {
  2299. "name": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)",
  2300. "category": "Activation"
  2301. },
  2302. {
  2303. "name": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
  2304. "category": "Transform"
  2305. },
  2306. {
  2307. "name": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)",
  2308. "category": "Transform"
  2309. },
  2310. {
  2311. "name": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)",
  2312. "category": "Transform"
  2313. },
  2314. {
  2315. "name": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)",
  2316. "category": "Transform"
  2317. },
  2318. {
  2319. "name": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)",
  2320. "category": "Transform"
  2321. },
  2322. {
  2323. "name": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2324. },
  2325. {
  2326. "name": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2327. },
  2328. {
  2329. "name": "aten::empty.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2330. },
  2331. {
  2332. "name": "aten::empty.names_out(int[] size, *, str[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2333. },
  2334. {
  2335. "name": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2336. },
  2337. {
  2338. "name": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2339. },
  2340. {
  2341. "name": "aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2342. },
  2343. {
  2344. "name": "aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)"
  2345. },
  2346. {
  2347. "name": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"
  2348. },
  2349. {
  2350. "name": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"
  2351. },
  2352. {
  2353. "name": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2354. },
  2355. {
  2356. "name": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2357. },
  2358. {
  2359. "name": "aten::eq.int_list(int[] a, int[] b) -> bool"
  2360. },
  2361. {
  2362. "name": "aten::eq.device(Device a, Device b) -> bool"
  2363. },
  2364. {
  2365. "name": "aten::eq.bool(bool a, bool b) -> bool"
  2366. },
  2367. {
  2368. "name": "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"
  2369. },
  2370. {
  2371. "name": "aten::eq.int(int a, int b) -> bool"
  2372. },
  2373. {
  2374. "name": "aten::eq.complex(complex a, complex b) -> bool"
  2375. },
  2376. {
  2377. "name": "aten::eq.float(float a, float b) -> bool"
  2378. },
  2379. {
  2380. "name": "aten::eq.int_float(int a, float b) -> bool"
  2381. },
  2382. {
  2383. "name": "aten::eq.float_int(float a, int b) -> bool"
  2384. },
  2385. {
  2386. "name": "aten::eq.float_complex(float a, complex b) -> bool"
  2387. },
  2388. {
  2389. "name": "aten::eq.complex_float(complex a, float b) -> bool"
  2390. },
  2391. {
  2392. "name": "aten::eq(Scalar a, Scalar b) -> bool"
  2393. },
  2394. {
  2395. "name": "aten::eq.str(str a, str b) -> bool"
  2396. },
  2397. {
  2398. "name": "aten::eq.float_list(float[] a, float[] b) -> bool"
  2399. },
  2400. {
  2401. "name": "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  2402. },
  2403. {
  2404. "name": "aten::eq.bool_list(bool[] a, bool[] b) -> bool"
  2405. },
  2406. {
  2407. "name": "aten::eq.str_list(str[] a, str[] b) -> bool"
  2408. },
  2409. {
  2410. "name": "aten::equal(Tensor self, Tensor other) -> bool"
  2411. },
  2412. {
  2413. "name": "aten::erf(Tensor self) -> Tensor"
  2414. },
  2415. {
  2416. "name": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2417. },
  2418. {
  2419. "name": "aten::erf.int(int a) -> float"
  2420. },
  2421. {
  2422. "name": "aten::erf.float(float a) -> float"
  2423. },
  2424. {
  2425. "name": "aten::erf.Scalar(Scalar a) -> Scalar"
  2426. },
  2427. {
  2428. "name": "aten::erfc(Tensor self) -> Tensor"
  2429. },
  2430. {
  2431. "name": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2432. },
  2433. {
  2434. "name": "aten::erfc.int(int a) -> float"
  2435. },
  2436. {
  2437. "name": "aten::erfc.float(float a) -> float"
  2438. },
  2439. {
  2440. "name": "aten::erfc.Scalar(Scalar a) -> Scalar"
  2441. },
  2442. {
  2443. "name": "aten::erfinv(Tensor self) -> Tensor"
  2444. },
  2445. {
  2446. "name": "aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2447. },
  2448. {
  2449. "name": "aten::exp(Tensor self) -> Tensor"
  2450. },
  2451. {
  2452. "name": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2453. },
  2454. {
  2455. "name": "aten::exp.int(int a) -> float"
  2456. },
  2457. {
  2458. "name": "aten::exp.float(float a) -> float"
  2459. },
  2460. {
  2461. "name": "aten::exp.complex(complex a) -> complex"
  2462. },
  2463. {
  2464. "name": "aten::exp.Scalar(Scalar a) -> Scalar"
  2465. },
  2466. {
  2467. "name": "aten::exp2(Tensor self) -> Tensor"
  2468. },
  2469. {
  2470. "name": "aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2471. },
  2472. {
  2473. "name": "aten::exp_(Tensor(a!) self) -> Tensor(a!)"
  2474. },
  2475. {
  2476. "name": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)"
  2477. },
  2478. {
  2479. "name": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  2480. },
  2481. {
  2482. "name": "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor"
  2483. },
  2484. {
  2485. "name": "aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)"
  2486. },
  2487. {
  2488. "name": "aten::expm1(Tensor self) -> Tensor"
  2489. },
  2490. {
  2491. "name": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2492. },
  2493. {
  2494. "name": "aten::expm1.int(int a) -> float"
  2495. },
  2496. {
  2497. "name": "aten::expm1.float(float a) -> float"
  2498. },
  2499. {
  2500. "name": "aten::expm1.Scalar(Scalar a) -> Scalar"
  2501. },
  2502. {
  2503. "name": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)"
  2504. },
  2505. {
  2506. "name": "aten::exponential_(Tensor(a!) self, float lambd=1., *, Generator? generator=None) -> Tensor(a!)"
  2507. },
  2508. {
  2509. "name": "aten::extend.t(t[](a!) self, t[] other) -> ()"
  2510. },
  2511. {
  2512. "name": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2513. },
  2514. {
  2515. "name": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2516. },
  2517. {
  2518. "name": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  2519. },
  2520. {
  2521. "name": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)"
  2522. },
  2523. {
  2524. "name": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor",
  2525. "category": "Quantization"
  2526. },
  2527. {
  2528. "name": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor",
  2529. "category": "Quantization"
  2530. },
  2531. {
  2532. "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor",
  2533. "category": "Quantization"
  2534. },
  2535. {
  2536. "name": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)",
  2537. "category": "Quantization"
  2538. },
  2539. {
  2540. "name": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))",
  2541. "category": "Quantization"
  2542. },
  2543. {
  2544. "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor",
  2545. "category": "Quantization"
  2546. },
  2547. {
  2548. "name": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor",
  2549. "category": "Dropout"
  2550. },
  2551. {
  2552. "name": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2553. "category": "Dropout"
  2554. },
  2555. {
  2556. "name": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor",
  2557. "category": "Dropout"
  2558. },
  2559. {
  2560. "name": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)",
  2561. "category": "Dropout"
  2562. },
  2563. {
  2564. "name": "aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor"
  2565. },
  2566. {
  2567. "name": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2568. },
  2569. {
  2570. "name": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2571. },
  2572. {
  2573. "name": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2574. },
  2575. {
  2576. "name": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2577. },
  2578. {
  2579. "name": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2580. },
  2581. {
  2582. "name": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2583. },
  2584. {
  2585. "name": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor"
  2586. },
  2587. {
  2588. "name": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2589. },
  2590. {
  2591. "name": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2592. },
  2593. {
  2594. "name": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2595. },
  2596. {
  2597. "name": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2598. },
  2599. {
  2600. "name": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2601. },
  2602. {
  2603. "name": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2604. },
  2605. {
  2606. "name": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2607. },
  2608. {
  2609. "name": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2610. },
  2611. {
  2612. "name": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2613. },
  2614. {
  2615. "name": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2616. },
  2617. {
  2618. "name": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor"
  2619. },
  2620. {
  2621. "name": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2622. },
  2623. {
  2624. "name": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2625. },
  2626. {
  2627. "name": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2628. },
  2629. {
  2630. "name": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2631. },
  2632. {
  2633. "name": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2634. },
  2635. {
  2636. "name": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2637. },
  2638. {
  2639. "name": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2640. },
  2641. {
  2642. "name": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2643. },
  2644. {
  2645. "name": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2646. },
  2647. {
  2648. "name": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2649. },
  2650. {
  2651. "name": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"
  2652. },
  2653. {
  2654. "name": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2655. },
  2656. {
  2657. "name": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor"
  2658. },
  2659. {
  2660. "name": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2661. },
  2662. {
  2663. "name": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor"
  2664. },
  2665. {
  2666. "name": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)"
  2667. },
  2668. {
  2669. "name": "aten::fill.Scalar(Tensor self, Scalar value) -> Tensor"
  2670. },
  2671. {
  2672. "name": "aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  2673. },
  2674. {
  2675. "name": "aten::fill.Tensor(Tensor self, Tensor value) -> Tensor"
  2676. },
  2677. {
  2678. "name": "aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  2679. },
  2680. {
  2681. "name": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)"
  2682. },
  2683. {
  2684. "name": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)"
  2685. },
  2686. {
  2687. "name": "aten::find(str self, str substr, int start=0, int end=-1) -> int"
  2688. },
  2689. {
  2690. "name": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
  2691. "category": "Shape"
  2692. },
  2693. {
  2694. "name": "aten::flatten.DimnameList(Tensor(a) self, str[] dims, str out_dim) -> Tensor(a)",
  2695. "category": "Shape"
  2696. },
  2697. {
  2698. "name": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, str out_dim) -> Tensor(a)",
  2699. "category": "Shape"
  2700. },
  2701. {
  2702. "name": "aten::flatten.using_names(Tensor(a) self, str start_dim, str end_dim, str out_dim) -> Tensor(a)",
  2703. "category": "Shape"
  2704. },
  2705. {
  2706. "name": "aten::flip(Tensor self, int[] dims) -> Tensor"
  2707. },
  2708. {
  2709. "name": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  2710. },
  2711. {
  2712. "name": "aten::floor(Tensor self) -> Tensor"
  2713. },
  2714. {
  2715. "name": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  2716. },
  2717. {
  2718. "name": "aten::floor.int(int a) -> int"
  2719. },
  2720. {
  2721. "name": "aten::floor.float(float a) -> int"
  2722. },
  2723. {
  2724. "name": "aten::floor.Scalar(Scalar a) -> Scalar"
  2725. },
  2726. {
  2727. "name": "aten::floor_(Tensor(a!) self) -> Tensor(a!)"
  2728. },
  2729. {
  2730. "name": "aten::floor_divide(Tensor self, Tensor other) -> Tensor"
  2731. },
  2732. {
  2733. "name": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  2734. },
  2735. {
  2736. "name": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2737. },
  2738. {
  2739. "name": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2740. },
  2741. {
  2742. "name": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2743. },
  2744. {
  2745. "name": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2746. },
  2747. {
  2748. "name": "aten::floordiv.int(int a, int b) -> int"
  2749. },
  2750. {
  2751. "name": "aten::floordiv.float(float a, float b) -> float"
  2752. },
  2753. {
  2754. "name": "aten::floordiv.int_float(int a, float b) -> float"
  2755. },
  2756. {
  2757. "name": "aten::floordiv.float_int(float a, int b) -> float"
  2758. },
  2759. {
  2760. "name": "aten::floordiv(Scalar a, Scalar b) -> Scalar"
  2761. },
  2762. {
  2763. "name": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor"
  2764. },
  2765. {
  2766. "name": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor"
  2767. },
  2768. {
  2769. "name": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2770. },
  2771. {
  2772. "name": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2773. },
  2774. {
  2775. "name": "aten::fmod.int(int a, int b) -> float"
  2776. },
  2777. {
  2778. "name": "aten::fmod.float(float a, float b) -> float"
  2779. },
  2780. {
  2781. "name": "aten::fmod.int_float(int a, float b) -> float"
  2782. },
  2783. {
  2784. "name": "aten::fmod.float_int(float a, int b) -> float"
  2785. },
  2786. {
  2787. "name": "aten::fmod(Scalar a, Scalar b) -> float"
  2788. },
  2789. {
  2790. "name": "aten::format(str self, ...) -> str",
  2791. "is_vararg": true
  2792. },
  2793. {
  2794. "name": "aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)"
  2795. },
  2796. {
  2797. "name": "aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)"
  2798. },
  2799. {
  2800. "name": "aten::frexp(float a) -> (float, int)"
  2801. },
  2802. {
  2803. "name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor",
  2804. "category": "Normalization"
  2805. },
  2806. {
  2807. "name": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  2808. },
  2809. {
  2810. "name": "aten::full.names(int[] size, Scalar fill_value, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2811. },
  2812. {
  2813. "name": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  2814. },
  2815. {
  2816. "name": "aten::full.names_out(int[] size, Scalar fill_value, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  2817. },
  2818. {
  2819. "name": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  2820. },
  2821. {
  2822. "name": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  2823. },
  2824. {
  2825. "name": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  2826. },
  2827. {
  2828. "name": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor"
  2829. },
  2830. {
  2831. "name": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  2832. "category": "Transform"
  2833. },
  2834. {
  2835. "name": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  2836. "category": "Transform"
  2837. },
  2838. {
  2839. "name": "aten::gather.dimname(Tensor self, str dim, Tensor index, *, bool sparse_grad=False) -> Tensor",
  2840. "category": "Transform"
  2841. },
  2842. {
  2843. "name": "aten::gather.dimname_out(Tensor self, str dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)",
  2844. "category": "Transform"
  2845. },
  2846. {
  2847. "name": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor"
  2848. },
  2849. {
  2850. "name": "aten::gcd(Tensor self, Tensor other) -> Tensor"
  2851. },
  2852. {
  2853. "name": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2854. },
  2855. {
  2856. "name": "aten::gcd.int(int a, int b) -> int"
  2857. },
  2858. {
  2859. "name": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2860. },
  2861. {
  2862. "name": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"
  2863. },
  2864. {
  2865. "name": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"
  2866. },
  2867. {
  2868. "name": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2869. },
  2870. {
  2871. "name": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2872. },
  2873. {
  2874. "name": "aten::ge.int(int a, int b) -> bool"
  2875. },
  2876. {
  2877. "name": "aten::ge.float(float a, float b) -> bool"
  2878. },
  2879. {
  2880. "name": "aten::ge.int_float(int a, float b) -> bool"
  2881. },
  2882. {
  2883. "name": "aten::ge.float_int(float a, int b) -> bool"
  2884. },
  2885. {
  2886. "name": "aten::ge(Scalar a, Scalar b) -> bool"
  2887. },
  2888. {
  2889. "name": "aten::ge.str(str a, str b) -> bool"
  2890. },
  2891. {
  2892. "name": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  2893. },
  2894. {
  2895. "name": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  2896. },
  2897. {
  2898. "name": "aten::gelu(Tensor self, *, str approximate=\"none\") -> Tensor",
  2899. "category": "Activation"
  2900. },
  2901. {
  2902. "name": "aten::gelu.out(Tensor self, *, str approximate=\"none\", Tensor(a!) out) -> Tensor(a!)",
  2903. "category": "Activation"
  2904. },
  2905. {
  2906. "name": "aten::gelu_(Tensor(a!) self, *, str approximate=\"none\") -> Tensor(a!)",
  2907. "category": "Activation"
  2908. },
  2909. {
  2910. "name": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate=\"none\") -> Tensor",
  2911. "category": "Activation"
  2912. },
  2913. {
  2914. "name": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate=\"none\", Tensor(a!) grad_input) -> Tensor(a!)",
  2915. "category": "Activation"
  2916. },
  2917. {
  2918. "name": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)"
  2919. },
  2920. {
  2921. "name": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)"
  2922. },
  2923. {
  2924. "name": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)"
  2925. },
  2926. {
  2927. "name": "aten::ger(Tensor self, Tensor vec2) -> Tensor"
  2928. },
  2929. {
  2930. "name": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  2931. },
  2932. {
  2933. "name": "aten::get.str(Dict(str, t) self, str key) -> t(*)?"
  2934. },
  2935. {
  2936. "name": "aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)"
  2937. },
  2938. {
  2939. "name": "aten::get.int(Dict(int, t) self, int key) -> t(*)?"
  2940. },
  2941. {
  2942. "name": "aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)"
  2943. },
  2944. {
  2945. "name": "aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?"
  2946. },
  2947. {
  2948. "name": "aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)"
  2949. },
  2950. {
  2951. "name": "aten::get.float(Dict(float, t) self, float key) -> t(*)?"
  2952. },
  2953. {
  2954. "name": "aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)"
  2955. },
  2956. {
  2957. "name": "aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?"
  2958. },
  2959. {
  2960. "name": "aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)"
  2961. },
  2962. {
  2963. "name": "aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?"
  2964. },
  2965. {
  2966. "name": "aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)"
  2967. },
  2968. {
  2969. "name": "aten::get_autocast_dtype(str device_type) -> ScalarType"
  2970. },
  2971. {
  2972. "name": "aten::get_device(Tensor self) -> int"
  2973. },
  2974. {
  2975. "name": "aten::get_num_threads() -> int"
  2976. },
  2977. {
  2978. "name": "aten::glu(Tensor self, int dim=-1) -> Tensor",
  2979. "category": "Activation"
  2980. },
  2981. {
  2982. "name": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)"
  2983. },
  2984. {
  2985. "name": "aten::grad(Tensor[] outputs, Tensor[] inputs, Tensor?[]? grad_outputs=None, bool? retain_graph=None, bool create_graph=False, bool allow_unused=False) -> Tensor?[]"
  2986. },
  2987. {
  2988. "name": "aten::greater.Tensor(Tensor self, Tensor other) -> Tensor"
  2989. },
  2990. {
  2991. "name": "aten::greater.Scalar(Tensor self, Scalar other) -> Tensor"
  2992. },
  2993. {
  2994. "name": "aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  2995. },
  2996. {
  2997. "name": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  2998. },
  2999. {
  3000. "name": "aten::greater(Tensor self, Tensor other) -> Tensor"
  3001. },
  3002. {
  3003. "name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  3004. },
  3005. {
  3006. "name": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  3007. },
  3008. {
  3009. "name": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3010. },
  3011. {
  3012. "name": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3013. },
  3014. {
  3015. "name": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  3016. },
  3017. {
  3018. "name": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3019. },
  3020. {
  3021. "name": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"
  3022. },
  3023. {
  3024. "name": "aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor"
  3025. },
  3026. {
  3027. "name": "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor"
  3028. },
  3029. {
  3030. "name": "aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"
  3031. },
  3032. {
  3033. "name": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enabled=True) -> Tensor",
  3034. "category": "Normalization"
  3035. },
  3036. {
  3037. "name": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  3038. "category": "Layer"
  3039. },
  3040. {
  3041. "name": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  3042. "category": "Layer"
  3043. },
  3044. {
  3045. "name": "aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"
  3046. },
  3047. {
  3048. "name": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"
  3049. },
  3050. {
  3051. "name": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"
  3052. },
  3053. {
  3054. "name": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3055. },
  3056. {
  3057. "name": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3058. },
  3059. {
  3060. "name": "aten::gt.int(int a, int b) -> bool"
  3061. },
  3062. {
  3063. "name": "aten::gt.float(float a, float b) -> bool"
  3064. },
  3065. {
  3066. "name": "aten::gt.int_float(int a, float b) -> bool"
  3067. },
  3068. {
  3069. "name": "aten::gt.float_int(float a, int b) -> bool"
  3070. },
  3071. {
  3072. "name": "aten::gt(Scalar a, Scalar b) -> bool"
  3073. },
  3074. {
  3075. "name": "aten::gt.str(str a, str b) -> bool"
  3076. },
  3077. {
  3078. "name": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3079. },
  3080. {
  3081. "name": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3082. },
  3083. {
  3084. "name": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3085. },
  3086. {
  3087. "name": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3088. },
  3089. {
  3090. "name": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  3091. },
  3092. {
  3093. "name": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  3094. },
  3095. {
  3096. "name": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)"
  3097. },
  3098. {
  3099. "name": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)"
  3100. },
  3101. {
  3102. "name": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3103. },
  3104. {
  3105. "name": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3106. },
  3107. {
  3108. "name": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)"
  3109. },
  3110. {
  3111. "name": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)"
  3112. },
  3113. {
  3114. "name": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  3115. },
  3116. {
  3117. "name": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  3118. },
  3119. {
  3120. "name": "aten::hardsigmoid(Tensor self) -> Tensor",
  3121. "category": "Activation"
  3122. },
  3123. {
  3124. "name": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3125. },
  3126. {
  3127. "name": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)",
  3128. "category": "Activation"
  3129. },
  3130. {
  3131. "name": "aten::hardswish(Tensor self) -> Tensor",
  3132. "category": "Activation"
  3133. },
  3134. {
  3135. "name": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3136. },
  3137. {
  3138. "name": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)",
  3139. "category": "Activation"
  3140. },
  3141. {
  3142. "name": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor"
  3143. },
  3144. {
  3145. "name": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3146. },
  3147. {
  3148. "name": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor",
  3149. "category": "Activation"
  3150. },
  3151. {
  3152. "name": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)",
  3153. "category": "Activation"
  3154. },
  3155. {
  3156. "name": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)",
  3157. "category": "Activation"
  3158. },
  3159. {
  3160. "name": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor"
  3161. },
  3162. {
  3163. "name": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3164. },
  3165. {
  3166. "name": "aten::has_torch_function(...) -> bool"
  3167. },
  3168. {
  3169. "name": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"
  3170. },
  3171. {
  3172. "name": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"
  3173. },
  3174. {
  3175. "name": "aten::hstack(Tensor[] tensors) -> Tensor"
  3176. },
  3177. {
  3178. "name": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  3179. },
  3180. {
  3181. "name": "aten::huber_loss(Tensor self, Tensor target, int reduction=1, float delta=1.) -> Tensor"
  3182. },
  3183. {
  3184. "name": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=1, float delta=1., *, Tensor(a!) out) -> Tensor(a!)"
  3185. },
  3186. {
  3187. "name": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3188. },
  3189. {
  3190. "name": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor"
  3191. },
  3192. {
  3193. "name": "aten::i0(Tensor self) -> Tensor"
  3194. },
  3195. {
  3196. "name": "aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3197. },
  3198. {
  3199. "name": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor"
  3200. },
  3201. {
  3202. "name": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"
  3203. },
  3204. {
  3205. "name": "aten::imag(Tensor(a) self) -> Tensor(a)"
  3206. },
  3207. {
  3208. "name": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"
  3209. },
  3210. {
  3211. "name": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)"
  3212. },
  3213. {
  3214. "name": "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"
  3215. },
  3216. {
  3217. "name": "aten::index.str(str self, str substr, int start=0, int end=-1) -> int"
  3218. },
  3219. {
  3220. "name": "aten::index.list_int(int[] self, int el) -> int"
  3221. },
  3222. {
  3223. "name": "aten::index.list_float(float[] self, float el) -> int"
  3224. },
  3225. {
  3226. "name": "aten::index.list_bool(bool[] self, bool el) -> int"
  3227. },
  3228. {
  3229. "name": "aten::index.list_Tensor(Tensor[] self, Tensor el) -> int"
  3230. },
  3231. {
  3232. "name": "aten::index.list_str(str[] self, str el) -> int"
  3233. },
  3234. {
  3235. "name": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  3236. },
  3237. {
  3238. "name": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  3239. },
  3240. {
  3241. "name": "aten::index_add.dimname(Tensor self, str dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"
  3242. },
  3243. {
  3244. "name": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)"
  3245. },
  3246. {
  3247. "name": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"
  3248. },
  3249. {
  3250. "name": "aten::index_copy.dimname(Tensor self, str dim, Tensor index, Tensor source) -> Tensor"
  3251. },
  3252. {
  3253. "name": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  3254. },
  3255. {
  3256. "name": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)"
  3257. },
  3258. {
  3259. "name": "aten::index_copy_.dimname(Tensor(a!) self, str dim, Tensor index, Tensor source) -> Tensor(a!)"
  3260. },
  3261. {
  3262. "name": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor"
  3263. },
  3264. {
  3265. "name": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  3266. },
  3267. {
  3268. "name": "aten::index_fill.Dimname_Scalar(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  3269. },
  3270. {
  3271. "name": "aten::index_fill.Dimname_Tensor(Tensor self, str dim, Tensor index, Tensor value) -> Tensor"
  3272. },
  3273. {
  3274. "name": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  3275. },
  3276. {
  3277. "name": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  3278. },
  3279. {
  3280. "name": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)"
  3281. },
  3282. {
  3283. "name": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  3284. },
  3285. {
  3286. "name": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, str dim, Tensor index, Scalar value) -> Tensor(a!)"
  3287. },
  3288. {
  3289. "name": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, str dim, Tensor index, Tensor value) -> Tensor(a!)"
  3290. },
  3291. {
  3292. "name": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"
  3293. },
  3294. {
  3295. "name": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)"
  3296. },
  3297. {
  3298. "name": "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"
  3299. },
  3300. {
  3301. "name": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  3302. },
  3303. {
  3304. "name": "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"
  3305. },
  3306. {
  3307. "name": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor"
  3308. },
  3309. {
  3310. "name": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  3311. },
  3312. {
  3313. "name": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)"
  3314. },
  3315. {
  3316. "name": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor"
  3317. },
  3318. {
  3319. "name": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  3320. },
  3321. {
  3322. "name": "aten::index_select.dimname(Tensor self, str dim, Tensor index) -> Tensor"
  3323. },
  3324. {
  3325. "name": "aten::index_select.dimname_out(Tensor self, str dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  3326. },
  3327. {
  3328. "name": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor"
  3329. },
  3330. {
  3331. "name": "aten::indices(Tensor(a) self) -> Tensor(a)"
  3332. },
  3333. {
  3334. "name": "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"
  3335. },
  3336. {
  3337. "name": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor",
  3338. "category": "Normalization"
  3339. },
  3340. {
  3341. "name": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3342. },
  3343. {
  3344. "name": "aten::int_repr(Tensor self) -> Tensor"
  3345. },
  3346. {
  3347. "name": "aten::inverse(Tensor self) -> Tensor"
  3348. },
  3349. {
  3350. "name": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3351. },
  3352. {
  3353. "name": "aten::is_autocast_enabled() -> bool"
  3354. },
  3355. {
  3356. "name": "aten::is_contiguous(Tensor self) -> bool"
  3357. },
  3358. {
  3359. "name": "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool"
  3360. },
  3361. {
  3362. "name": "aten::is_floating_point(Tensor self) -> bool"
  3363. },
  3364. {
  3365. "name": "aten::is_grad_enabled() -> bool"
  3366. },
  3367. {
  3368. "name": "aten::is_pinned(Tensor self, Device? device=None) -> bool"
  3369. },
  3370. {
  3371. "name": "aten::is_scripting() -> bool"
  3372. },
  3373. {
  3374. "name": "aten::isdigit(str self) -> bool"
  3375. },
  3376. {
  3377. "name": "aten::isfinite(Tensor self) -> Tensor"
  3378. },
  3379. {
  3380. "name": "aten::isfinite.float(float a) -> bool"
  3381. },
  3382. {
  3383. "name": "aten::isfinite.complex(complex a) -> bool"
  3384. },
  3385. {
  3386. "name": "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3387. },
  3388. {
  3389. "name": "aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3390. },
  3391. {
  3392. "name": "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3393. },
  3394. {
  3395. "name": "aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3396. },
  3397. {
  3398. "name": "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"
  3399. },
  3400. {
  3401. "name": "aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)"
  3402. },
  3403. {
  3404. "name": "aten::isinf(Tensor self) -> Tensor"
  3405. },
  3406. {
  3407. "name": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3408. },
  3409. {
  3410. "name": "aten::isinf.float(float a) -> bool"
  3411. },
  3412. {
  3413. "name": "aten::isinf.complex(complex a) -> bool"
  3414. },
  3415. {
  3416. "name": "aten::isnan(Tensor self) -> Tensor"
  3417. },
  3418. {
  3419. "name": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3420. },
  3421. {
  3422. "name": "aten::isnan.float(float a) -> bool"
  3423. },
  3424. {
  3425. "name": "aten::isnan.complex(complex a) -> bool"
  3426. },
  3427. {
  3428. "name": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor"
  3429. },
  3430. {
  3431. "name": "aten::item(Tensor self) -> Scalar"
  3432. },
  3433. {
  3434. "name": "aten::items.str(Dict(str, t) self) -> ((str, t)[])"
  3435. },
  3436. {
  3437. "name": "aten::items.int(Dict(int, t) self) -> ((int, t)[])"
  3438. },
  3439. {
  3440. "name": "aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])"
  3441. },
  3442. {
  3443. "name": "aten::items.float(Dict(float, t) self) -> ((float, t)[])"
  3444. },
  3445. {
  3446. "name": "aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])"
  3447. },
  3448. {
  3449. "name": "aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])"
  3450. },
  3451. {
  3452. "name": "aten::join(str self, str[] values) -> str"
  3453. },
  3454. {
  3455. "name": "aten::keys.str(Dict(str, t) self) -> str[](*)"
  3456. },
  3457. {
  3458. "name": "aten::keys.int(Dict(int, t) self) -> int[](*)"
  3459. },
  3460. {
  3461. "name": "aten::keys.bool(Dict(bool, t) self) -> bool[](*)"
  3462. },
  3463. {
  3464. "name": "aten::keys.float(Dict(float, t) self) -> float[](*)"
  3465. },
  3466. {
  3467. "name": "aten::keys.complex(Dict(complex, t) self) -> complex[](*)"
  3468. },
  3469. {
  3470. "name": "aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)"
  3471. },
  3472. {
  3473. "name": "aten::kl_div(Tensor self, Tensor target, int reduction=1, *, bool log_target=False) -> Tensor"
  3474. },
  3475. {
  3476. "name": "aten::kthvalue(Tensor self, SymInt k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"
  3477. },
  3478. {
  3479. "name": "aten::kthvalue.dimname(Tensor self, SymInt k, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  3480. },
  3481. {
  3482. "name": "aten::kthvalue.dimname_out(Tensor self, SymInt k, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3483. },
  3484. {
  3485. "name": "aten::kthvalue.values(Tensor self, SymInt k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  3486. },
  3487. {
  3488. "name": "aten::l1_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  3489. },
  3490. {
  3491. "name": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enable=True) -> Tensor",
  3492. "category": "Normalization"
  3493. },
  3494. {
  3495. "name": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor"
  3496. },
  3497. {
  3498. "name": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor"
  3499. },
  3500. {
  3501. "name": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3502. },
  3503. {
  3504. "name": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3505. },
  3506. {
  3507. "name": "aten::le.int(int a, int b) -> bool"
  3508. },
  3509. {
  3510. "name": "aten::le.float(float a, float b) -> bool"
  3511. },
  3512. {
  3513. "name": "aten::le.int_float(int a, float b) -> bool"
  3514. },
  3515. {
  3516. "name": "aten::le.float_int(float a, int b) -> bool"
  3517. },
  3518. {
  3519. "name": "aten::le(Scalar a, Scalar b) -> bool"
  3520. },
  3521. {
  3522. "name": "aten::le.str(str a, str b) -> bool"
  3523. },
  3524. {
  3525. "name": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor",
  3526. "category": "Activation"
  3527. },
  3528. {
  3529. "name": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"
  3530. },
  3531. {
  3532. "name": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)",
  3533. "category": "Activation"
  3534. },
  3535. {
  3536. "name": "aten::len.t(t[] a) -> int"
  3537. },
  3538. {
  3539. "name": "aten::len.Tensor(Tensor t) -> int"
  3540. },
  3541. {
  3542. "name": "aten::len.str(str s) -> int"
  3543. },
  3544. {
  3545. "name": "aten::len.Dict_str(Dict(str, t) self) -> int"
  3546. },
  3547. {
  3548. "name": "aten::len.Dict_int(Dict(int, t) self) -> int"
  3549. },
  3550. {
  3551. "name": "aten::len.Dict_bool(Dict(bool, t) self) -> int"
  3552. },
  3553. {
  3554. "name": "aten::len.Dict_float(Dict(float, t) self) -> int"
  3555. },
  3556. {
  3557. "name": "aten::len.Dict_complex(Dict(complex, t) self) -> int"
  3558. },
  3559. {
  3560. "name": "aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int"
  3561. },
  3562. {
  3563. "name": "aten::len.any(Any[] a) -> int"
  3564. },
  3565. {
  3566. "name": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"
  3567. },
  3568. {
  3569. "name": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"
  3570. },
  3571. {
  3572. "name": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"
  3573. },
  3574. {
  3575. "name": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"
  3576. },
  3577. {
  3578. "name": "aten::less.Tensor(Tensor self, Tensor other) -> Tensor"
  3579. },
  3580. {
  3581. "name": "aten::less.Scalar(Tensor self, Scalar other) -> Tensor"
  3582. },
  3583. {
  3584. "name": "aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3585. },
  3586. {
  3587. "name": "aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3588. },
  3589. {
  3590. "name": "aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  3591. },
  3592. {
  3593. "name": "aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  3594. },
  3595. {
  3596. "name": "aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  3597. },
  3598. {
  3599. "name": "aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3600. },
  3601. {
  3602. "name": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)"
  3603. },
  3604. {
  3605. "name": "aten::lift_fresh_copy(Tensor self) -> Tensor"
  3606. },
  3607. {
  3608. "name": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3609. },
  3610. {
  3611. "name": "aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor"
  3612. },
  3613. {
  3614. "name": "aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)"
  3615. },
  3616. {
  3617. "name": "aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)"
  3618. },
  3619. {
  3620. "name": "aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)"
  3621. },
  3622. {
  3623. "name": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"
  3624. },
  3625. {
  3626. "name": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)"
  3627. },
  3628. {
  3629. "name": "aten::linalg_det(Tensor A) -> Tensor"
  3630. },
  3631. {
  3632. "name": "aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"
  3633. },
  3634. {
  3635. "name": "aten::linalg_inv(Tensor A) -> Tensor"
  3636. },
  3637. {
  3638. "name": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)"
  3639. },
  3640. {
  3641. "name": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)"
  3642. },
  3643. {
  3644. "name": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)"
  3645. },
  3646. {
  3647. "name": "aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)"
  3648. },
  3649. {
  3650. "name": "aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)"
  3651. },
  3652. {
  3653. "name": "aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  3654. },
  3655. {
  3656. "name": "aten::linalg_matrix_norm.str_ord(Tensor self, str ord=\"fro\", int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  3657. },
  3658. {
  3659. "name": "aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  3660. },
  3661. {
  3662. "name": "aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord=\"fro\", int[] dim=[-2, -1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  3663. },
  3664. {
  3665. "name": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  3666. },
  3667. {
  3668. "name": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  3669. },
  3670. {
  3671. "name": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  3672. },
  3673. {
  3674. "name": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  3675. },
  3676. {
  3677. "name": "aten::linalg_qr(Tensor A, str mode=\"reduced\") -> (Tensor Q, Tensor R)"
  3678. },
  3679. {
  3680. "name": "aten::linalg_qr.out(Tensor A, str mode=\"reduced\", *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)"
  3681. },
  3682. {
  3683. "name": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)"
  3684. },
  3685. {
  3686. "name": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  3687. },
  3688. {
  3689. "name": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor"
  3690. },
  3691. {
  3692. "name": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)"
  3693. },
  3694. {
  3695. "name": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)"
  3696. },
  3697. {
  3698. "name": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)"
  3699. },
  3700. {
  3701. "name": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor"
  3702. },
  3703. {
  3704. "name": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)"
  3705. },
  3706. {
  3707. "name": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)"
  3708. },
  3709. {
  3710. "name": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)"
  3711. },
  3712. {
  3713. "name": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor"
  3714. },
  3715. {
  3716. "name": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)"
  3717. },
  3718. {
  3719. "name": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor"
  3720. },
  3721. {
  3722. "name": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)"
  3723. },
  3724. {
  3725. "name": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  3726. },
  3727. {
  3728. "name": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  3729. },
  3730. {
  3731. "name": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor",
  3732. "category": "Layer"
  3733. },
  3734. {
  3735. "name": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)",
  3736. "category": "Layer"
  3737. },
  3738. {
  3739. "name": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  3740. },
  3741. {
  3742. "name": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"
  3743. },
  3744. {
  3745. "name": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3746. },
  3747. {
  3748. "name": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3749. },
  3750. {
  3751. "name": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3752. },
  3753. {
  3754. "name": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3755. },
  3756. {
  3757. "name": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  3758. },
  3759. {
  3760. "name": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  3761. },
  3762. {
  3763. "name": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  3764. },
  3765. {
  3766. "name": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)"
  3767. },
  3768. {
  3769. "name": "aten::list(str t) -> str[]"
  3770. },
  3771. {
  3772. "name": "aten::list.t(t[] l) -> t[]"
  3773. },
  3774. {
  3775. "name": "aten::list_with_default(int[] list, int[] defaults) -> int[]"
  3776. },
  3777. {
  3778. "name": "aten::log(Tensor self) -> Tensor"
  3779. },
  3780. {
  3781. "name": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3782. },
  3783. {
  3784. "name": "aten::log.int(int a) -> float"
  3785. },
  3786. {
  3787. "name": "aten::log.float(float a) -> float"
  3788. },
  3789. {
  3790. "name": "aten::log.complex(complex a) -> complex"
  3791. },
  3792. {
  3793. "name": "aten::log.Scalar(Scalar a) -> Scalar"
  3794. },
  3795. {
  3796. "name": "aten::log.int_int(int a, int b) -> float"
  3797. },
  3798. {
  3799. "name": "aten::log.float_float(float a, float b) -> float"
  3800. },
  3801. {
  3802. "name": "aten::log.complex_complex(complex a, complex b) -> complex"
  3803. },
  3804. {
  3805. "name": "aten::log.int_float(int a, float b) -> float"
  3806. },
  3807. {
  3808. "name": "aten::log.float_int(float a, int b) -> float"
  3809. },
  3810. {
  3811. "name": "aten::log.int_complex(int a, complex b) -> complex"
  3812. },
  3813. {
  3814. "name": "aten::log.complex_int(complex a, int b) -> complex"
  3815. },
  3816. {
  3817. "name": "aten::log.float_complex(float a, complex b) -> complex"
  3818. },
  3819. {
  3820. "name": "aten::log.complex_float(complex a, float b) -> complex"
  3821. },
  3822. {
  3823. "name": "aten::log.Scalar_Scalar(Scalar a, Scalar b) -> float"
  3824. },
  3825. {
  3826. "name": "aten::log10(Tensor self) -> Tensor"
  3827. },
  3828. {
  3829. "name": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3830. },
  3831. {
  3832. "name": "aten::log10.int(int a) -> float"
  3833. },
  3834. {
  3835. "name": "aten::log10.float(float a) -> float"
  3836. },
  3837. {
  3838. "name": "aten::log10.complex(complex a) -> complex"
  3839. },
  3840. {
  3841. "name": "aten::log10.Scalar(Scalar a) -> Scalar"
  3842. },
  3843. {
  3844. "name": "aten::log10_(Tensor(a!) self) -> Tensor(a!)"
  3845. },
  3846. {
  3847. "name": "aten::log1p(Tensor self) -> Tensor"
  3848. },
  3849. {
  3850. "name": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3851. },
  3852. {
  3853. "name": "aten::log1p.int(int a) -> float"
  3854. },
  3855. {
  3856. "name": "aten::log1p.float(float a) -> float"
  3857. },
  3858. {
  3859. "name": "aten::log1p.Scalar(Scalar a) -> Scalar"
  3860. },
  3861. {
  3862. "name": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)"
  3863. },
  3864. {
  3865. "name": "aten::log2(Tensor self) -> Tensor"
  3866. },
  3867. {
  3868. "name": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3869. },
  3870. {
  3871. "name": "aten::log2_(Tensor(a!) self) -> Tensor(a!)"
  3872. },
  3873. {
  3874. "name": "aten::log_(Tensor(a!) self) -> Tensor(a!)"
  3875. },
  3876. {
  3877. "name": "aten::log_normal_(Tensor(a!) self, float mean=1., float std=2., *, Generator? generator=None) -> Tensor(a!)"
  3878. },
  3879. {
  3880. "name": "aten::log_sigmoid(Tensor self) -> Tensor"
  3881. },
  3882. {
  3883. "name": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3884. },
  3885. {
  3886. "name": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor"
  3887. },
  3888. {
  3889. "name": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3890. },
  3891. {
  3892. "name": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)"
  3893. },
  3894. {
  3895. "name": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"
  3896. },
  3897. {
  3898. "name": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  3899. "category": "Activation"
  3900. },
  3901. {
  3902. "name": "aten::log_softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  3903. "category": "Activation"
  3904. },
  3905. {
  3906. "name": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  3907. },
  3908. {
  3909. "name": "aten::logaddexp(Tensor self, Tensor other) -> Tensor"
  3910. },
  3911. {
  3912. "name": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3913. },
  3914. {
  3915. "name": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor"
  3916. },
  3917. {
  3918. "name": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3919. },
  3920. {
  3921. "name": "aten::logcumsumexp(Tensor self, int dim) -> Tensor"
  3922. },
  3923. {
  3924. "name": "aten::logcumsumexp.dimname(Tensor self, str dim) -> Tensor"
  3925. },
  3926. {
  3927. "name": "aten::logcumsumexp.dimname_out(Tensor self, str dim, *, Tensor(a!) out) -> Tensor(a!)"
  3928. },
  3929. {
  3930. "name": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  3931. },
  3932. {
  3933. "name": "aten::logdet(Tensor self) -> Tensor"
  3934. },
  3935. {
  3936. "name": "aten::logical_and(Tensor self, Tensor other) -> Tensor"
  3937. },
  3938. {
  3939. "name": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3940. },
  3941. {
  3942. "name": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3943. },
  3944. {
  3945. "name": "aten::logical_not(Tensor self) -> Tensor"
  3946. },
  3947. {
  3948. "name": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  3949. },
  3950. {
  3951. "name": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)"
  3952. },
  3953. {
  3954. "name": "aten::logical_or(Tensor self, Tensor other) -> Tensor"
  3955. },
  3956. {
  3957. "name": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3958. },
  3959. {
  3960. "name": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3961. },
  3962. {
  3963. "name": "aten::logical_xor(Tensor self, Tensor other) -> Tensor"
  3964. },
  3965. {
  3966. "name": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  3967. },
  3968. {
  3969. "name": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  3970. },
  3971. {
  3972. "name": "aten::logit(Tensor self, float? eps=None) -> Tensor"
  3973. },
  3974. {
  3975. "name": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"
  3976. },
  3977. {
  3978. "name": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)"
  3979. },
  3980. {
  3981. "name": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor"
  3982. },
  3983. {
  3984. "name": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  3985. },
  3986. {
  3987. "name": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3988. },
  3989. {
  3990. "name": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3991. },
  3992. {
  3993. "name": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3994. },
  3995. {
  3996. "name": "aten::logspace(Scalar start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  3997. },
  3998. {
  3999. "name": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4000. },
  4001. {
  4002. "name": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4003. },
  4004. {
  4005. "name": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4006. },
  4007. {
  4008. "name": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)"
  4009. },
  4010. {
  4011. "name": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor"
  4012. },
  4013. {
  4014. "name": "aten::logsumexp.names(Tensor self, str[1] dim, bool keepdim=False) -> Tensor"
  4015. },
  4016. {
  4017. "name": "aten::logsumexp.names_out(Tensor self, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4018. },
  4019. {
  4020. "name": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4021. },
  4022. {
  4023. "name": "aten::lower(str self) -> str"
  4024. },
  4025. {
  4026. "name": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)",
  4027. "category": "Layer"
  4028. },
  4029. {
  4030. "name": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)",
  4031. "category": "Layer"
  4032. },
  4033. {
  4034. "name": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)",
  4035. "category": "Layer"
  4036. },
  4037. {
  4038. "name": "aten::lstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  4039. },
  4040. {
  4041. "name": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"
  4042. },
  4043. {
  4044. "name": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"
  4045. },
  4046. {
  4047. "name": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4048. },
  4049. {
  4050. "name": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4051. },
  4052. {
  4053. "name": "aten::lt.int(int a, int b) -> bool"
  4054. },
  4055. {
  4056. "name": "aten::lt.float(float a, float b) -> bool"
  4057. },
  4058. {
  4059. "name": "aten::lt.int_float(int a, float b) -> bool"
  4060. },
  4061. {
  4062. "name": "aten::lt.float_int(float a, int b) -> bool"
  4063. },
  4064. {
  4065. "name": "aten::lt(Scalar a, Scalar b) -> bool"
  4066. },
  4067. {
  4068. "name": "aten::lt.str(str a, str b) -> bool"
  4069. },
  4070. {
  4071. "name": "aten::mT(Tensor(a) self) -> Tensor(a)"
  4072. },
  4073. {
  4074. "name": "aten::mT.a(Tensor(a) self) -> Tensor(a)"
  4075. },
  4076. {
  4077. "name": "aten::manual_seed(int seed) -> ()"
  4078. },
  4079. {
  4080. "name": "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)"
  4081. },
  4082. {
  4083. "name": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor"
  4084. },
  4085. {
  4086. "name": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor"
  4087. },
  4088. {
  4089. "name": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  4090. },
  4091. {
  4092. "name": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)"
  4093. },
  4094. {
  4095. "name": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)"
  4096. },
  4097. {
  4098. "name": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)"
  4099. },
  4100. {
  4101. "name": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor"
  4102. },
  4103. {
  4104. "name": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)"
  4105. },
  4106. {
  4107. "name": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)"
  4108. },
  4109. {
  4110. "name": "aten::masked_select(Tensor self, Tensor mask) -> Tensor"
  4111. },
  4112. {
  4113. "name": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)"
  4114. },
  4115. {
  4116. "name": "aten::matmul(Tensor self, Tensor other) -> Tensor"
  4117. },
  4118. {
  4119. "name": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4120. },
  4121. {
  4122. "name": "aten::max.other(Tensor self, Tensor other) -> Tensor"
  4123. },
  4124. {
  4125. "name": "aten::max(Tensor self) -> Tensor"
  4126. },
  4127. {
  4128. "name": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4129. },
  4130. {
  4131. "name": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  4132. },
  4133. {
  4134. "name": "aten::max.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4135. },
  4136. {
  4137. "name": "aten::max.names_dim_max(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"
  4138. },
  4139. {
  4140. "name": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4141. },
  4142. {
  4143. "name": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4144. },
  4145. {
  4146. "name": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> Tensor",
  4147. "category": "Pool"
  4148. },
  4149. {
  4150. "name": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor, Tensor)",
  4151. "category": "Pool"
  4152. },
  4153. {
  4154. "name": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
  4155. "category": "Pool"
  4156. },
  4157. {
  4158. "name": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
  4159. "category": "Pool"
  4160. },
  4161. {
  4162. "name": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  4163. },
  4164. {
  4165. "name": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor",
  4166. "category": "Pool"
  4167. },
  4168. {
  4169. "name": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)"
  4170. },
  4171. {
  4172. "name": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"
  4173. },
  4174. {
  4175. "name": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor"
  4176. },
  4177. {
  4178. "name": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4179. },
  4180. {
  4181. "name": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor",
  4182. "category": "Pool"
  4183. },
  4184. {
  4185. "name": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"
  4186. },
  4187. {
  4188. "name": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor",
  4189. "category": "Pool"
  4190. },
  4191. {
  4192. "name": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)"
  4193. },
  4194. {
  4195. "name": "aten::maximum(Tensor self, Tensor other) -> Tensor"
  4196. },
  4197. {
  4198. "name": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4199. },
  4200. {
  4201. "name": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  4202. },
  4203. {
  4204. "name": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4205. },
  4206. {
  4207. "name": "aten::mean.names_dim(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4208. },
  4209. {
  4210. "name": "aten::mean.names_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4211. },
  4212. {
  4213. "name": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4214. },
  4215. {
  4216. "name": "aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4217. },
  4218. {
  4219. "name": "aten::median(Tensor self) -> Tensor"
  4220. },
  4221. {
  4222. "name": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4223. },
  4224. {
  4225. "name": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4226. },
  4227. {
  4228. "name": "aten::median.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4229. },
  4230. {
  4231. "name": "aten::median.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4232. },
  4233. {
  4234. "name": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4235. },
  4236. {
  4237. "name": "aten::meshgrid(Tensor[] tensors) -> Tensor[]",
  4238. "category": "Tensor"
  4239. },
  4240. {
  4241. "name": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]",
  4242. "category": "Tensor"
  4243. },
  4244. {
  4245. "name": "aten::min.other(Tensor self, Tensor other) -> Tensor"
  4246. },
  4247. {
  4248. "name": "aten::min(Tensor self) -> Tensor"
  4249. },
  4250. {
  4251. "name": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4252. },
  4253. {
  4254. "name": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4255. },
  4256. {
  4257. "name": "aten::min.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4258. },
  4259. {
  4260. "name": "aten::min.names_dim_min(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4261. },
  4262. {
  4263. "name": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4264. },
  4265. {
  4266. "name": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4267. },
  4268. {
  4269. "name": "aten::minimum(Tensor self, Tensor other) -> Tensor"
  4270. },
  4271. {
  4272. "name": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4273. },
  4274. {
  4275. "name": "aten::mish(Tensor self) -> Tensor",
  4276. "category": "Activation"
  4277. },
  4278. {
  4279. "name": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4280. },
  4281. {
  4282. "name": "aten::mish_(Tensor(a!) self) -> Tensor(a!)",
  4283. "category": "Activation"
  4284. },
  4285. {
  4286. "name": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  4287. },
  4288. {
  4289. "name": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None) -> Tensor"
  4290. },
  4291. {
  4292. "name": "aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)"
  4293. },
  4294. {
  4295. "name": "aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
  4296. },
  4297. {
  4298. "name": "aten::mm(Tensor self, Tensor mat2) -> Tensor"
  4299. },
  4300. {
  4301. "name": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"
  4302. },
  4303. {
  4304. "name": "aten::mm.dtype_out(Tensor self, Tensor mat2, ScalarType out_dtype, *, Tensor(a!) out) -> Tensor(a!)"
  4305. },
  4306. {
  4307. "name": "aten::mm.dtype(Tensor self, Tensor mat2, ScalarType out_dtype) -> Tensor"
  4308. },
  4309. {
  4310. "name": "aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4311. },
  4312. {
  4313. "name": "aten::mode.dimname(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)"
  4314. },
  4315. {
  4316. "name": "aten::mode.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4317. },
  4318. {
  4319. "name": "aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  4320. },
  4321. {
  4322. "name": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  4323. },
  4324. {
  4325. "name": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  4326. },
  4327. {
  4328. "name": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)"
  4329. },
  4330. {
  4331. "name": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)"
  4332. },
  4333. {
  4334. "name": "aten::mse_loss(Tensor self, Tensor target, int reduction=1) -> Tensor"
  4335. },
  4336. {
  4337. "name": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)"
  4338. },
  4339. {
  4340. "name": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor"
  4341. },
  4342. {
  4343. "name": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"
  4344. },
  4345. {
  4346. "name": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor"
  4347. },
  4348. {
  4349. "name": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor"
  4350. },
  4351. {
  4352. "name": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4353. },
  4354. {
  4355. "name": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4356. },
  4357. {
  4358. "name": "aten::mul.left_t(t[] l, int n) -> t[]"
  4359. },
  4360. {
  4361. "name": "aten::mul.right_(int n, t[] l) -> t[]"
  4362. },
  4363. {
  4364. "name": "aten::mul.int(int a, int b) -> int"
  4365. },
  4366. {
  4367. "name": "aten::mul.complex(complex a, complex b) -> complex"
  4368. },
  4369. {
  4370. "name": "aten::mul.float(float a, float b) -> float"
  4371. },
  4372. {
  4373. "name": "aten::mul.int_complex(int a, complex b) -> complex"
  4374. },
  4375. {
  4376. "name": "aten::mul.complex_int(complex a, int b) -> complex"
  4377. },
  4378. {
  4379. "name": "aten::mul.float_complex(float a, complex b) -> complex"
  4380. },
  4381. {
  4382. "name": "aten::mul.complex_float(complex a, float b) -> complex"
  4383. },
  4384. {
  4385. "name": "aten::mul.int_float(int a, float b) -> float"
  4386. },
  4387. {
  4388. "name": "aten::mul.float_int(float a, int b) -> float"
  4389. },
  4390. {
  4391. "name": "aten::mul(Scalar a, Scalar b) -> Scalar"
  4392. },
  4393. {
  4394. "name": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4395. },
  4396. {
  4397. "name": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4398. },
  4399. {
  4400. "name": "aten::mul_.t(t[](a!) l, int n) -> t[](a!)"
  4401. },
  4402. {
  4403. "name": "aten::multinomial(Tensor self, SymInt num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor"
  4404. },
  4405. {
  4406. "name": "aten::multinomial.out(Tensor self, SymInt num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4407. },
  4408. {
  4409. "name": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor"
  4410. },
  4411. {
  4412. "name": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor"
  4413. },
  4414. {
  4415. "name": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4416. },
  4417. {
  4418. "name": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  4419. },
  4420. {
  4421. "name": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  4422. },
  4423. {
  4424. "name": "aten::mv(Tensor self, Tensor vec) -> Tensor"
  4425. },
  4426. {
  4427. "name": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)"
  4428. },
  4429. {
  4430. "name": "aten::mvlgamma(Tensor self, int p) -> Tensor"
  4431. },
  4432. {
  4433. "name": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)"
  4434. },
  4435. {
  4436. "name": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)"
  4437. },
  4438. {
  4439. "name": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor"
  4440. },
  4441. {
  4442. "name": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)"
  4443. },
  4444. {
  4445. "name": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)"
  4446. },
  4447. {
  4448. "name": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)"
  4449. },
  4450. {
  4451. "name": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)"
  4452. },
  4453. {
  4454. "name": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor"
  4455. },
  4456. {
  4457. "name": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)"
  4458. },
  4459. {
  4460. "name": "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)"
  4461. },
  4462. {
  4463. "name": "aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  4464. },
  4465. {
  4466. "name": "aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)"
  4467. },
  4468. {
  4469. "name": "aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  4470. },
  4471. {
  4472. "name": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)",
  4473. "category": "Normalization"
  4474. },
  4475. {
  4476. "name": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))",
  4477. "category": "Normalization"
  4478. },
  4479. {
  4480. "name": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"
  4481. },
  4482. {
  4483. "name": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"
  4484. },
  4485. {
  4486. "name": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4487. },
  4488. {
  4489. "name": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4490. },
  4491. {
  4492. "name": "aten::ne.int_list(int[] a, int[] b) -> bool"
  4493. },
  4494. {
  4495. "name": "aten::ne.device(Device a, Device b) -> bool"
  4496. },
  4497. {
  4498. "name": "aten::ne.bool(bool a, bool b) -> bool"
  4499. },
  4500. {
  4501. "name": "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"
  4502. },
  4503. {
  4504. "name": "aten::ne.int(int a, int b) -> bool"
  4505. },
  4506. {
  4507. "name": "aten::ne.complex(complex a, complex b) -> bool"
  4508. },
  4509. {
  4510. "name": "aten::ne.float(float a, float b) -> bool"
  4511. },
  4512. {
  4513. "name": "aten::ne.int_float(int a, float b) -> bool"
  4514. },
  4515. {
  4516. "name": "aten::ne.float_int(float a, int b) -> bool"
  4517. },
  4518. {
  4519. "name": "aten::ne.float_complex(float a, complex b) -> bool"
  4520. },
  4521. {
  4522. "name": "aten::ne.complex_float(complex a, float b) -> bool"
  4523. },
  4524. {
  4525. "name": "aten::ne(Scalar a, Scalar b) -> bool"
  4526. },
  4527. {
  4528. "name": "aten::ne.str(str a, str b) -> bool"
  4529. },
  4530. {
  4531. "name": "aten::ne.float_list(float[] a, float[] b) -> bool"
  4532. },
  4533. {
  4534. "name": "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool"
  4535. },
  4536. {
  4537. "name": "aten::ne.bool_list(bool[] a, bool[] b) -> bool"
  4538. },
  4539. {
  4540. "name": "aten::ne.str_list(str[] a, str[] b) -> bool"
  4541. },
  4542. {
  4543. "name": "aten::neg(Tensor self) -> Tensor"
  4544. },
  4545. {
  4546. "name": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4547. },
  4548. {
  4549. "name": "aten::neg.int(int a) -> int"
  4550. },
  4551. {
  4552. "name": "aten::neg.float(float a) -> float"
  4553. },
  4554. {
  4555. "name": "aten::neg.complex(complex a) -> complex"
  4556. },
  4557. {
  4558. "name": "aten::neg.Scalar(Scalar a) -> Scalar"
  4559. },
  4560. {
  4561. "name": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor"
  4562. },
  4563. {
  4564. "name": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4565. },
  4566. {
  4567. "name": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4568. },
  4569. {
  4570. "name": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4571. },
  4572. {
  4573. "name": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)"
  4574. },
  4575. {
  4576. "name": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4577. },
  4578. {
  4579. "name": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"
  4580. },
  4581. {
  4582. "name": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4583. },
  4584. {
  4585. "name": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4586. },
  4587. {
  4588. "name": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4589. },
  4590. {
  4591. "name": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4592. },
  4593. {
  4594. "name": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4595. },
  4596. {
  4597. "name": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  4598. },
  4599. {
  4600. "name": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4601. },
  4602. {
  4603. "name": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"
  4604. },
  4605. {
  4606. "name": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor"
  4607. },
  4608. {
  4609. "name": "aten::nonzero(Tensor self) -> Tensor"
  4610. },
  4611. {
  4612. "name": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  4613. },
  4614. {
  4615. "name": "aten::nonzero_numpy(Tensor self) -> Tensor[]"
  4616. },
  4617. {
  4618. "name": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor"
  4619. },
  4620. {
  4621. "name": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor"
  4622. },
  4623. {
  4624. "name": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, str[1] dim, bool keepdim=False) -> Tensor"
  4625. },
  4626. {
  4627. "name": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  4628. },
  4629. {
  4630. "name": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4631. },
  4632. {
  4633. "name": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4634. },
  4635. {
  4636. "name": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor"
  4637. },
  4638. {
  4639. "name": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4640. },
  4641. {
  4642. "name": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)"
  4643. },
  4644. {
  4645. "name": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"
  4646. },
  4647. {
  4648. "name": "aten::norm.names_dtype_out(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"
  4649. },
  4650. {
  4651. "name": "aten::norm.names_out(Tensor self, Scalar? p, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4652. },
  4653. {
  4654. "name": "aten::normal.Tensor_float(Tensor mean, float std=1., *, Generator? generator=None) -> Tensor"
  4655. },
  4656. {
  4657. "name": "aten::normal.Tensor_float_out(Tensor mean, float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4658. },
  4659. {
  4660. "name": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4661. },
  4662. {
  4663. "name": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor"
  4664. },
  4665. {
  4666. "name": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor"
  4667. },
  4668. {
  4669. "name": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4670. },
  4671. {
  4672. "name": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4673. },
  4674. {
  4675. "name": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4676. },
  4677. {
  4678. "name": "aten::normal.out(Tensor self, float mean=0., float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"
  4679. },
  4680. {
  4681. "name": "aten::normal_(Tensor(a!) self, float mean=0., float std=1., *, Generator? generator=None) -> Tensor(a!)"
  4682. },
  4683. {
  4684. "name": "aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor"
  4685. },
  4686. {
  4687. "name": "aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor"
  4688. },
  4689. {
  4690. "name": "aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  4691. },
  4692. {
  4693. "name": "aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  4694. },
  4695. {
  4696. "name": "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor"
  4697. },
  4698. {
  4699. "name": "aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor"
  4700. },
  4701. {
  4702. "name": "aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4703. },
  4704. {
  4705. "name": "aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  4706. },
  4707. {
  4708. "name": "aten::numel(Tensor self) -> int"
  4709. },
  4710. {
  4711. "name": "aten::numpy_T(Tensor(a) self) -> Tensor(a)"
  4712. },
  4713. {
  4714. "name": "aten::numpy_T.a(Tensor(a) self) -> Tensor(a)"
  4715. },
  4716. {
  4717. "name": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor"
  4718. },
  4719. {
  4720. "name": "aten::ones.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4721. },
  4722. {
  4723. "name": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  4724. },
  4725. {
  4726. "name": "aten::ones.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  4727. },
  4728. {
  4729. "name": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  4730. },
  4731. {
  4732. "name": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  4733. },
  4734. {
  4735. "name": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  4736. },
  4737. {
  4738. "name": "aten::ord(str string) -> int"
  4739. },
  4740. {
  4741. "name": "aten::outer(Tensor self, Tensor vec2) -> Tensor"
  4742. },
  4743. {
  4744. "name": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"
  4745. },
  4746. {
  4747. "name": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor",
  4748. "category": "Tensor"
  4749. },
  4750. {
  4751. "name": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0., str padding_side=\"right\") -> Tensor"
  4752. },
  4753. {
  4754. "name": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2., float eps=9.9999999999999995e-07, bool keepdim=False) -> Tensor"
  4755. },
  4756. {
  4757. "name": "aten::pdist(Tensor self, float p=2.) -> Tensor"
  4758. },
  4759. {
  4760. "name": "aten::percentFormat(str self, ...) -> str"
  4761. },
  4762. {
  4763. "name": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)",
  4764. "category": "Shape"
  4765. },
  4766. {
  4767. "name": "aten::permute_copy(Tensor self, int[] dims) -> Tensor"
  4768. },
  4769. {
  4770. "name": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)"
  4771. },
  4772. {
  4773. "name": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)"
  4774. },
  4775. {
  4776. "name": "aten::pinverse(Tensor self, float rcond=1.0000000000000001e-15) -> Tensor"
  4777. },
  4778. {
  4779. "name": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor"
  4780. },
  4781. {
  4782. "name": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  4783. },
  4784. {
  4785. "name": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor"
  4786. },
  4787. {
  4788. "name": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)"
  4789. },
  4790. {
  4791. "name": "aten::poisson(Tensor self, Generator? generator=None) -> Tensor"
  4792. },
  4793. {
  4794. "name": "aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"
  4795. },
  4796. {
  4797. "name": "aten::polar(Tensor abs, Tensor angle) -> Tensor"
  4798. },
  4799. {
  4800. "name": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)"
  4801. },
  4802. {
  4803. "name": "aten::polar.int(int a, int b) -> complex"
  4804. },
  4805. {
  4806. "name": "aten::polar.float(float a, float b) -> complex"
  4807. },
  4808. {
  4809. "name": "aten::polar.int_float(int a, float b) -> complex"
  4810. },
  4811. {
  4812. "name": "aten::polar.float_int(float a, int b) -> complex"
  4813. },
  4814. {
  4815. "name": "aten::polar.Scalar_Scalar(Scalar a, Scalar b) -> Scalar"
  4816. },
  4817. {
  4818. "name": "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"
  4819. },
  4820. {
  4821. "name": "aten::pop.Dict_str(Dict(str, t)(a!) self, str key) -> t(*)"
  4822. },
  4823. {
  4824. "name": "aten::pop.Dict_default_str(Dict(str, t)(a!) self, str key, t default_value) -> t(*)"
  4825. },
  4826. {
  4827. "name": "aten::pop.Dict_int(Dict(int, t)(a!) self, int key) -> t(*)"
  4828. },
  4829. {
  4830. "name": "aten::pop.Dict_default_int(Dict(int, t)(a!) self, int key, t default_value) -> t(*)"
  4831. },
  4832. {
  4833. "name": "aten::pop.Dict_bool(Dict(bool, t)(a!) self, bool key) -> t(*)"
  4834. },
  4835. {
  4836. "name": "aten::pop.Dict_default_bool(Dict(bool, t)(a!) self, bool key, t default_value) -> t(*)"
  4837. },
  4838. {
  4839. "name": "aten::pop.Dict_float(Dict(float, t)(a!) self, float key) -> t(*)"
  4840. },
  4841. {
  4842. "name": "aten::pop.Dict_default_float(Dict(float, t)(a!) self, float key, t default_value) -> t(*)"
  4843. },
  4844. {
  4845. "name": "aten::pop.Dict_complex(Dict(complex, t)(a!) self, complex key) -> t(*)"
  4846. },
  4847. {
  4848. "name": "aten::pop.Dict_default_complex(Dict(complex, t)(a!) self, complex key, t default_value) -> t(*)"
  4849. },
  4850. {
  4851. "name": "aten::pop.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> t(*)"
  4852. },
  4853. {
  4854. "name": "aten::pop.Dict_default_Tensor(Dict(Tensor, t)(a!) self, Tensor key, t default_value) -> t(*)"
  4855. },
  4856. {
  4857. "name": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor"
  4858. },
  4859. {
  4860. "name": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor"
  4861. },
  4862. {
  4863. "name": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor"
  4864. },
  4865. {
  4866. "name": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  4867. },
  4868. {
  4869. "name": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)"
  4870. },
  4871. {
  4872. "name": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"
  4873. },
  4874. {
  4875. "name": "aten::pow.int(int a, int b) -> float"
  4876. },
  4877. {
  4878. "name": "aten::pow.complex(complex a, complex b) -> complex"
  4879. },
  4880. {
  4881. "name": "aten::pow.float(float a, float b) -> float"
  4882. },
  4883. {
  4884. "name": "aten::pow.int_float(int a, float b) -> float"
  4885. },
  4886. {
  4887. "name": "aten::pow.float_int(float a, int b) -> float"
  4888. },
  4889. {
  4890. "name": "aten::pow.float_complex(float a, complex b) -> complex"
  4891. },
  4892. {
  4893. "name": "aten::pow.complex_float(complex a, float b) -> complex"
  4894. },
  4895. {
  4896. "name": "aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float"
  4897. },
  4898. {
  4899. "name": "aten::pow.int_to_int(int a, int b) -> int"
  4900. },
  4901. {
  4902. "name": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)"
  4903. },
  4904. {
  4905. "name": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)"
  4906. },
  4907. {
  4908. "name": "aten::prelu(Tensor self, Tensor weight) -> Tensor",
  4909. "category": "Activation"
  4910. },
  4911. {
  4912. "name": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  4913. },
  4914. {
  4915. "name": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4916. },
  4917. {
  4918. "name": "aten::prod.dim_Dimname(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  4919. },
  4920. {
  4921. "name": "aten::prod.Dimname_out(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4922. },
  4923. {
  4924. "name": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4925. },
  4926. {
  4927. "name": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  4928. },
  4929. {
  4930. "name": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)"
  4931. },
  4932. {
  4933. "name": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  4934. },
  4935. {
  4936. "name": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor"
  4937. },
  4938. {
  4939. "name": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  4940. },
  4941. {
  4942. "name": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)"
  4943. },
  4944. {
  4945. "name": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor",
  4946. "category": "Quantization"
  4947. },
  4948. {
  4949. "name": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  4950. },
  4951. {
  4952. "name": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor",
  4953. "category": "Quantization"
  4954. },
  4955. {
  4956. "name": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor",
  4957. "category": "Quantization"
  4958. },
  4959. {
  4960. "name": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]",
  4961. "category": "Quantization"
  4962. },
  4963. {
  4964. "name": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  4965. },
  4966. {
  4967. "name": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  4968. },
  4969. {
  4970. "name": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()"
  4971. },
  4972. {
  4973. "name": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor",
  4974. "category": "Quantization"
  4975. },
  4976. {
  4977. "name": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)"
  4978. },
  4979. {
  4980. "name": "aten::quantized_gru.input(Tensor input, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  4981. "category": "Layer"
  4982. },
  4983. {
  4984. "name": "aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  4985. "category": "Layer"
  4986. },
  4987. {
  4988. "name": "aten::quantized_gru.input_legacy(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  4989. "category": "Layer"
  4990. },
  4991. {
  4992. "name": "aten::quantized_gru.data_legacy(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
  4993. "category": "Layer"
  4994. },
  4995. {
  4996. "name": "aten::quantized_lstm.input(Tensor input, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  4997. "category": "Layer"
  4998. },
  4999. {
  5000. "name": "aten::quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5001. "category": "Layer"
  5002. },
  5003. {
  5004. "name": "aten::quantized_lstm.input_legacy(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5005. "category": "Layer"
  5006. },
  5007. {
  5008. "name": "aten::quantized_lstm.data_legacy(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)",
  5009. "category": "Layer"
  5010. },
  5011. {
  5012. "name": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)"
  5013. },
  5014. {
  5015. "name": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5016. },
  5017. {
  5018. "name": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5019. },
  5020. {
  5021. "name": "aten::rand.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5022. },
  5023. {
  5024. "name": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5025. },
  5026. {
  5027. "name": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5028. },
  5029. {
  5030. "name": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5031. },
  5032. {
  5033. "name": "aten::rand.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5034. },
  5035. {
  5036. "name": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5037. },
  5038. {
  5039. "name": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5040. },
  5041. {
  5042. "name": "aten::rand_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5043. },
  5044. {
  5045. "name": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5046. },
  5047. {
  5048. "name": "aten::rand_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5049. },
  5050. {
  5051. "name": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5052. },
  5053. {
  5054. "name": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5055. },
  5056. {
  5057. "name": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5058. },
  5059. {
  5060. "name": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5061. },
  5062. {
  5063. "name": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5064. },
  5065. {
  5066. "name": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5067. },
  5068. {
  5069. "name": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5070. },
  5071. {
  5072. "name": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5073. },
  5074. {
  5075. "name": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5076. },
  5077. {
  5078. "name": "aten::randint_like.Tensor(Tensor self, Tensor high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5079. },
  5080. {
  5081. "name": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5082. },
  5083. {
  5084. "name": "aten::randint_like.generator(Tensor self, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5085. },
  5086. {
  5087. "name": "aten::randint_like.Tensor_generator(Tensor self, Tensor high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5088. },
  5089. {
  5090. "name": "aten::randint_like.low_generator_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5091. },
  5092. {
  5093. "name": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5094. },
  5095. {
  5096. "name": "aten::randint_like.generator_out(Tensor self, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5097. },
  5098. {
  5099. "name": "aten::randint_like.Tensor_out(Tensor self, Tensor high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5100. },
  5101. {
  5102. "name": "aten::randint_like.Tensor_generator_out(Tensor self, Tensor high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5103. },
  5104. {
  5105. "name": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5106. },
  5107. {
  5108. "name": "aten::randint_like.low_generator_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5109. },
  5110. {
  5111. "name": "aten::randint_like.generator_with_low_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5112. },
  5113. {
  5114. "name": "aten::randint_like.generator_with_low_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5115. },
  5116. {
  5117. "name": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5118. },
  5119. {
  5120. "name": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5121. },
  5122. {
  5123. "name": "aten::randn.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5124. },
  5125. {
  5126. "name": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5127. },
  5128. {
  5129. "name": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5130. },
  5131. {
  5132. "name": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5133. },
  5134. {
  5135. "name": "aten::randn.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5136. },
  5137. {
  5138. "name": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  5139. },
  5140. {
  5141. "name": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5142. },
  5143. {
  5144. "name": "aten::randn_like.generator(Tensor self, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  5145. },
  5146. {
  5147. "name": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5148. },
  5149. {
  5150. "name": "aten::randn_like.generator_out(Tensor self, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5151. },
  5152. {
  5153. "name": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)"
  5154. },
  5155. {
  5156. "name": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)"
  5157. },
  5158. {
  5159. "name": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)"
  5160. },
  5161. {
  5162. "name": "aten::randperm(SymInt n, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5163. },
  5164. {
  5165. "name": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5166. },
  5167. {
  5168. "name": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)"
  5169. },
  5170. {
  5171. "name": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"
  5172. },
  5173. {
  5174. "name": "aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5175. },
  5176. {
  5177. "name": "aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5178. },
  5179. {
  5180. "name": "aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"
  5181. },
  5182. {
  5183. "name": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
  5184. },
  5185. {
  5186. "name": "aten::ravel(Tensor(a) self) -> Tensor(a)"
  5187. },
  5188. {
  5189. "name": "aten::real(Tensor(a) self) -> Tensor(a)"
  5190. },
  5191. {
  5192. "name": "aten::reciprocal(Tensor self) -> Tensor"
  5193. },
  5194. {
  5195. "name": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5196. },
  5197. {
  5198. "name": "aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)"
  5199. },
  5200. {
  5201. "name": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  5202. "category": "Tensor"
  5203. },
  5204. {
  5205. "name": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5206. },
  5207. {
  5208. "name": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  5209. "category": "Tensor"
  5210. },
  5211. {
  5212. "name": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5213. },
  5214. {
  5215. "name": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  5216. "category": "Tensor"
  5217. },
  5218. {
  5219. "name": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5220. },
  5221. {
  5222. "name": "aten::relu(Tensor self) -> Tensor",
  5223. "category": "Activation"
  5224. },
  5225. {
  5226. "name": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5227. },
  5228. {
  5229. "name": "aten::relu6(Tensor self) -> Tensor",
  5230. "category": "Activation"
  5231. },
  5232. {
  5233. "name": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)",
  5234. "category": "Activation"
  5235. },
  5236. {
  5237. "name": "aten::relu_(Tensor(a!) self) -> Tensor(a!)",
  5238. "category": "Activation"
  5239. },
  5240. {
  5241. "name": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor"
  5242. },
  5243. {
  5244. "name": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor"
  5245. },
  5246. {
  5247. "name": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor"
  5248. },
  5249. {
  5250. "name": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5251. },
  5252. {
  5253. "name": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  5254. },
  5255. {
  5256. "name": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  5257. },
  5258. {
  5259. "name": "aten::remainder.int(int a, int b) -> int"
  5260. },
  5261. {
  5262. "name": "aten::remainder.float(float a, float b) -> float"
  5263. },
  5264. {
  5265. "name": "aten::remainder.int_float(int a, float b) -> float"
  5266. },
  5267. {
  5268. "name": "aten::remainder.float_int(float a, int b) -> float"
  5269. },
  5270. {
  5271. "name": "aten::remainder(Scalar a, Scalar b) -> Scalar"
  5272. },
  5273. {
  5274. "name": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  5275. },
  5276. {
  5277. "name": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  5278. },
  5279. {
  5280. "name": "aten::remove.int(int[](a!) self, int el) -> ()"
  5281. },
  5282. {
  5283. "name": "aten::remove.float(float[](a!) self, float el) -> ()"
  5284. },
  5285. {
  5286. "name": "aten::remove.bool(bool[](a!) self, bool el) -> ()"
  5287. },
  5288. {
  5289. "name": "aten::remove.Tensor(Tensor[](a!) self, Tensor el) -> ()"
  5290. },
  5291. {
  5292. "name": "aten::remove.str(str[](a!) self, str el) -> ()"
  5293. },
  5294. {
  5295. "name": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"
  5296. },
  5297. {
  5298. "name": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)"
  5299. },
  5300. {
  5301. "name": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor"
  5302. },
  5303. {
  5304. "name": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)"
  5305. },
  5306. {
  5307. "name": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor"
  5308. },
  5309. {
  5310. "name": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5311. },
  5312. {
  5313. "name": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor"
  5314. },
  5315. {
  5316. "name": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)"
  5317. },
  5318. {
  5319. "name": "aten::replace(str self, str old, str new, int max=-1) -> str"
  5320. },
  5321. {
  5322. "name": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor",
  5323. "category": "Tensor"
  5324. },
  5325. {
  5326. "name": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5327. },
  5328. {
  5329. "name": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor",
  5330. "category": "Tensor"
  5331. },
  5332. {
  5333. "name": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5334. },
  5335. {
  5336. "name": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor",
  5337. "category": "Tensor"
  5338. },
  5339. {
  5340. "name": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)"
  5341. },
  5342. {
  5343. "name": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)"
  5344. },
  5345. {
  5346. "name": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)",
  5347. "category": "Shape"
  5348. },
  5349. {
  5350. "name": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)",
  5351. "category": "Shape"
  5352. },
  5353. {
  5354. "name": "aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor"
  5355. },
  5356. {
  5357. "name": "aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  5358. },
  5359. {
  5360. "name": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)"
  5361. },
  5362. {
  5363. "name": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)"
  5364. },
  5365. {
  5366. "name": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)"
  5367. },
  5368. {
  5369. "name": "aten::retain_grad(Tensor(a!) self) -> ()"
  5370. },
  5371. {
  5372. "name": "aten::reverse.t(t[](a!) self) -> ()"
  5373. },
  5374. {
  5375. "name": "aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor"
  5376. },
  5377. {
  5378. "name": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5379. "category": "Layer"
  5380. },
  5381. {
  5382. "name": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  5383. },
  5384. {
  5385. "name": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
  5386. "category": "Layer"
  5387. },
  5388. {
  5389. "name": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)"
  5390. },
  5391. {
  5392. "name": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"
  5393. },
  5394. {
  5395. "name": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor",
  5396. "category": "Layer"
  5397. },
  5398. {
  5399. "name": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)"
  5400. },
  5401. {
  5402. "name": "aten::rot90(Tensor self, int k=1, int[] dims=[0, 1]) -> Tensor"
  5403. },
  5404. {
  5405. "name": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0, 1], *, Tensor(a!) out) -> Tensor(a!)"
  5406. },
  5407. {
  5408. "name": "aten::round(Tensor self) -> Tensor"
  5409. },
  5410. {
  5411. "name": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor"
  5412. },
  5413. {
  5414. "name": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5415. },
  5416. {
  5417. "name": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)"
  5418. },
  5419. {
  5420. "name": "aten::round.int(int a) -> float"
  5421. },
  5422. {
  5423. "name": "aten::round.float(float a) -> float"
  5424. },
  5425. {
  5426. "name": "aten::round.Scalar(Scalar a) -> Scalar"
  5427. },
  5428. {
  5429. "name": "aten::round_(Tensor(a!) self) -> Tensor(a!)"
  5430. },
  5431. {
  5432. "name": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)"
  5433. },
  5434. {
  5435. "name": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor",
  5436. "category": "Activation"
  5437. },
  5438. {
  5439. "name": "aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor(a!)",
  5440. "category": "Activation"
  5441. },
  5442. {
  5443. "name": "aten::rsqrt(Tensor self) -> Tensor"
  5444. },
  5445. {
  5446. "name": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5447. },
  5448. {
  5449. "name": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)"
  5450. },
  5451. {
  5452. "name": "aten::rstrip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  5453. },
  5454. {
  5455. "name": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  5456. },
  5457. {
  5458. "name": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  5459. },
  5460. {
  5461. "name": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  5462. },
  5463. {
  5464. "name": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  5465. },
  5466. {
  5467. "name": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  5468. },
  5469. {
  5470. "name": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)"
  5471. },
  5472. {
  5473. "name": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0., bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor",
  5474. "category": "Attention"
  5475. },
  5476. {
  5477. "name": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"
  5478. },
  5479. {
  5480. "name": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  5481. },
  5482. {
  5483. "name": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor"
  5484. },
  5485. {
  5486. "name": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor"
  5487. },
  5488. {
  5489. "name": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  5490. },
  5491. {
  5492. "name": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  5493. },
  5494. {
  5495. "name": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  5496. },
  5497. {
  5498. "name": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)"
  5499. },
  5500. {
  5501. "name": "aten::scatter.dimname_src(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  5502. },
  5503. {
  5504. "name": "aten::scatter.dimname_value(Tensor self, str dim, Tensor index, Scalar value) -> Tensor"
  5505. },
  5506. {
  5507. "name": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  5508. },
  5509. {
  5510. "name": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)"
  5511. },
  5512. {
  5513. "name": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)"
  5514. },
  5515. {
  5516. "name": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)"
  5517. },
  5518. {
  5519. "name": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"
  5520. },
  5521. {
  5522. "name": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)"
  5523. },
  5524. {
  5525. "name": "aten::scatter_add.dimname(Tensor self, str dim, Tensor index, Tensor src) -> Tensor"
  5526. },
  5527. {
  5528. "name": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)"
  5529. },
  5530. {
  5531. "name": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor"
  5532. },
  5533. {
  5534. "name": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)"
  5535. },
  5536. {
  5537. "name": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)"
  5538. },
  5539. {
  5540. "name": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  5541. },
  5542. {
  5543. "name": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  5544. },
  5545. {
  5546. "name": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor"
  5547. },
  5548. {
  5549. "name": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)"
  5550. },
  5551. {
  5552. "name": "aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor"
  5553. },
  5554. {
  5555. "name": "aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)"
  5556. },
  5557. {
  5558. "name": "aten::select.Dimname(Tensor(a) self, str dim, int index) -> Tensor(a)"
  5559. },
  5560. {
  5561. "name": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)"
  5562. },
  5563. {
  5564. "name": "aten::select.t(t[](a) list, int idx) -> t(*)"
  5565. },
  5566. {
  5567. "name": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor"
  5568. },
  5569. {
  5570. "name": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5571. },
  5572. {
  5573. "name": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor"
  5574. },
  5575. {
  5576. "name": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5577. },
  5578. {
  5579. "name": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor"
  5580. },
  5581. {
  5582. "name": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)"
  5583. },
  5584. {
  5585. "name": "aten::selu(Tensor self) -> Tensor",
  5586. "category": "Activation"
  5587. },
  5588. {
  5589. "name": "aten::selu_(Tensor(a!) self) -> Tensor(a!)",
  5590. "category": "Activation"
  5591. },
  5592. {
  5593. "name": "aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)"
  5594. },
  5595. {
  5596. "name": "aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)"
  5597. },
  5598. {
  5599. "name": "aten::set_(Tensor(a!) self) -> Tensor(a!)"
  5600. },
  5601. {
  5602. "name": "aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)"
  5603. },
  5604. {
  5605. "name": "aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)"
  5606. },
  5607. {
  5608. "name": "aten::set_grad_enabled(bool val) -> ()"
  5609. },
  5610. {
  5611. "name": "aten::set_num_threads(int nthreads) -> ()"
  5612. },
  5613. {
  5614. "name": "aten::sigmoid(Tensor self) -> Tensor",
  5615. "category": "Activation"
  5616. },
  5617. {
  5618. "name": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5619. },
  5620. {
  5621. "name": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)",
  5622. "category": "Activation"
  5623. },
  5624. {
  5625. "name": "aten::sign(Tensor self) -> Tensor"
  5626. },
  5627. {
  5628. "name": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5629. },
  5630. {
  5631. "name": "aten::sign_(Tensor(a!) self) -> Tensor(a!)"
  5632. },
  5633. {
  5634. "name": "aten::signbit(Tensor self) -> Tensor"
  5635. },
  5636. {
  5637. "name": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5638. },
  5639. {
  5640. "name": "aten::silu(Tensor self) -> Tensor",
  5641. "category": "Activation"
  5642. },
  5643. {
  5644. "name": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5645. },
  5646. {
  5647. "name": "aten::silu_(Tensor(a!) self) -> Tensor(a!)",
  5648. "category": "Activation"
  5649. },
  5650. {
  5651. "name": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor"
  5652. },
  5653. {
  5654. "name": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"
  5655. },
  5656. {
  5657. "name": "aten::sin(Tensor self) -> Tensor"
  5658. },
  5659. {
  5660. "name": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5661. },
  5662. {
  5663. "name": "aten::sin.int(int a) -> float"
  5664. },
  5665. {
  5666. "name": "aten::sin.float(float a) -> float"
  5667. },
  5668. {
  5669. "name": "aten::sin.complex(complex a) -> complex"
  5670. },
  5671. {
  5672. "name": "aten::sin.Scalar(Scalar a) -> Scalar"
  5673. },
  5674. {
  5675. "name": "aten::sinc(Tensor self) -> Tensor"
  5676. },
  5677. {
  5678. "name": "aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5679. },
  5680. {
  5681. "name": "aten::sinh(Tensor self) -> Tensor"
  5682. },
  5683. {
  5684. "name": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5685. },
  5686. {
  5687. "name": "aten::sinh.int(int a) -> float"
  5688. },
  5689. {
  5690. "name": "aten::sinh.float(float a) -> float"
  5691. },
  5692. {
  5693. "name": "aten::sinh.complex(complex a) -> complex"
  5694. },
  5695. {
  5696. "name": "aten::sinh.Scalar(Scalar a) -> Scalar"
  5697. },
  5698. {
  5699. "name": "aten::size.int(Tensor self, int dim) -> int"
  5700. },
  5701. {
  5702. "name": "aten::size.Dimname(Tensor self, str dim) -> int"
  5703. },
  5704. {
  5705. "name": "aten::size(Tensor self) -> int[]"
  5706. },
  5707. {
  5708. "name": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)",
  5709. "category": "Tensor"
  5710. },
  5711. {
  5712. "name": "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
  5713. "category": "Tensor"
  5714. },
  5715. {
  5716. "name": "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str",
  5717. "category": "Tensor"
  5718. },
  5719. {
  5720. "name": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  5721. },
  5722. {
  5723. "name": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  5724. },
  5725. {
  5726. "name": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor"
  5727. },
  5728. {
  5729. "name": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)"
  5730. },
  5731. {
  5732. "name": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)"
  5733. },
  5734. {
  5735. "name": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)"
  5736. },
  5737. {
  5738. "name": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=1, float beta=1.) -> Tensor"
  5739. },
  5740. {
  5741. "name": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=1, float beta=1., *, Tensor(a!) out) -> Tensor(a!)"
  5742. },
  5743. {
  5744. "name": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)"
  5745. },
  5746. {
  5747. "name": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor"
  5748. },
  5749. {
  5750. "name": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
  5751. "category": "Activation"
  5752. },
  5753. {
  5754. "name": "aten::softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor",
  5755. "category": "Activation"
  5756. },
  5757. {
  5758. "name": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  5759. },
  5760. {
  5761. "name": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor",
  5762. "category": "Activation"
  5763. },
  5764. {
  5765. "name": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)"
  5766. },
  5767. {
  5768. "name": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor"
  5769. },
  5770. {
  5771. "name": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"
  5772. },
  5773. {
  5774. "name": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  5775. },
  5776. {
  5777. "name": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)"
  5778. },
  5779. {
  5780. "name": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  5781. },
  5782. {
  5783. "name": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  5784. },
  5785. {
  5786. "name": "aten::sort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  5787. },
  5788. {
  5789. "name": "aten::sort.dimname_values(Tensor self, str dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  5790. },
  5791. {
  5792. "name": "aten::sort.dimname_stable(Tensor self, *, bool? stable, str dim, bool descending=False) -> (Tensor values, Tensor indices)"
  5793. },
  5794. {
  5795. "name": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, str dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  5796. },
  5797. {
  5798. "name": "aten::sort.int(int[](a!) self, bool reverse=False) -> ()"
  5799. },
  5800. {
  5801. "name": "aten::sort.float(float[](a!) self, bool reverse=False) -> ()"
  5802. },
  5803. {
  5804. "name": "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()"
  5805. },
  5806. {
  5807. "name": "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()"
  5808. },
  5809. {
  5810. "name": "aten::sort.str(str[](a!) self, bool reverse=False) -> ()"
  5811. },
  5812. {
  5813. "name": "aten::sort.any(t[](a!) self, bool reverse=False) -> ()"
  5814. },
  5815. {
  5816. "name": "aten::sorted.int(int[](a) input) -> int[]"
  5817. },
  5818. {
  5819. "name": "aten::sorted.float(float[](a) input) -> float[]"
  5820. },
  5821. {
  5822. "name": "aten::sorted.Tensor(Tensor[](a) input) -> Tensor[]"
  5823. },
  5824. {
  5825. "name": "aten::sorted.bool(bool[](a) input) -> bool[]"
  5826. },
  5827. {
  5828. "name": "aten::sorted.str(str[](a) input) -> str[]"
  5829. },
  5830. {
  5831. "name": "aten::sorted.any(t[](a) self) -> t[]"
  5832. },
  5833. {
  5834. "name": "aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5835. },
  5836. {
  5837. "name": "aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5838. },
  5839. {
  5840. "name": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5841. },
  5842. {
  5843. "name": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  5844. },
  5845. {
  5846. "name": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor"
  5847. },
  5848. {
  5849. "name": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"
  5850. },
  5851. {
  5852. "name": "aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5853. },
  5854. {
  5855. "name": "aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5856. },
  5857. {
  5858. "name": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5859. },
  5860. {
  5861. "name": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor"
  5862. },
  5863. {
  5864. "name": "aten::special_expit(Tensor self) -> Tensor"
  5865. },
  5866. {
  5867. "name": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5868. },
  5869. {
  5870. "name": "aten::special_expm1(Tensor self) -> Tensor"
  5871. },
  5872. {
  5873. "name": "aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5874. },
  5875. {
  5876. "name": "aten::special_logit(Tensor self, float? eps=None) -> Tensor"
  5877. },
  5878. {
  5879. "name": "aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)"
  5880. },
  5881. {
  5882. "name": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]",
  5883. "category": "Tensor"
  5884. },
  5885. {
  5886. "name": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]",
  5887. "category": "Tensor"
  5888. },
  5889. {
  5890. "name": "aten::split.str(str self, str? separator=None, int max=-1) -> str[]"
  5891. },
  5892. {
  5893. "name": "aten::split(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]"
  5894. },
  5895. {
  5896. "name": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]"
  5897. },
  5898. {
  5899. "name": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  5900. },
  5901. {
  5902. "name": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]",
  5903. "category": "Tensor"
  5904. },
  5905. {
  5906. "name": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]",
  5907. "category": "Tensor"
  5908. },
  5909. {
  5910. "name": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()",
  5911. "category": "Tensor"
  5912. },
  5913. {
  5914. "name": "aten::splitlines(str self, bool keepends=False) -> str[]"
  5915. },
  5916. {
  5917. "name": "aten::sqrt(Tensor self) -> Tensor"
  5918. },
  5919. {
  5920. "name": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5921. },
  5922. {
  5923. "name": "aten::sqrt.int(int a) -> float"
  5924. },
  5925. {
  5926. "name": "aten::sqrt.float(float a) -> float"
  5927. },
  5928. {
  5929. "name": "aten::sqrt.complex(complex a) -> complex"
  5930. },
  5931. {
  5932. "name": "aten::sqrt.Scalar(Scalar a) -> Scalar"
  5933. },
  5934. {
  5935. "name": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)"
  5936. },
  5937. {
  5938. "name": "aten::square(Tensor self) -> Tensor"
  5939. },
  5940. {
  5941. "name": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5942. },
  5943. {
  5944. "name": "aten::square_(Tensor(a!) self) -> Tensor(a!)"
  5945. },
  5946. {
  5947. "name": "aten::squeeze(Tensor(a) self) -> Tensor(a)",
  5948. "category": "Transform"
  5949. },
  5950. {
  5951. "name": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)",
  5952. "category": "Transform"
  5953. },
  5954. {
  5955. "name": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)",
  5956. "category": "Transform"
  5957. },
  5958. {
  5959. "name": "aten::squeeze.dimname(Tensor(a) self, str dim) -> Tensor(a)",
  5960. "category": "Transform"
  5961. },
  5962. {
  5963. "name": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)",
  5964. "category": "Transform"
  5965. },
  5966. {
  5967. "name": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)",
  5968. "category": "Transform"
  5969. },
  5970. {
  5971. "name": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)"
  5972. },
  5973. {
  5974. "name": "aten::squeeze_.dimname(Tensor(a!) self, str dim) -> Tensor(a!)",
  5975. "category": "Transform"
  5976. },
  5977. {
  5978. "name": "aten::squeeze_copy(Tensor self) -> Tensor"
  5979. },
  5980. {
  5981. "name": "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor"
  5982. },
  5983. {
  5984. "name": "aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor"
  5985. },
  5986. {
  5987. "name": "aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  5988. },
  5989. {
  5990. "name": "aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  5991. },
  5992. {
  5993. "name": "aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)"
  5994. },
  5995. {
  5996. "name": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor",
  5997. "category": "Tensor"
  5998. },
  5999. {
  6000. "name": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"
  6001. },
  6002. {
  6003. "name": "aten::startswith(str self, str substr, int start=0, int end=-1) -> bool"
  6004. },
  6005. {
  6006. "name": "aten::std(Tensor self, bool unbiased=True) -> Tensor"
  6007. },
  6008. {
  6009. "name": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6010. },
  6011. {
  6012. "name": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6013. },
  6014. {
  6015. "name": "aten::std.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6016. },
  6017. {
  6018. "name": "aten::std.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6019. },
  6020. {
  6021. "name": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6022. },
  6023. {
  6024. "name": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6025. },
  6026. {
  6027. "name": "aten::std.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6028. },
  6029. {
  6030. "name": "aten::std.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6031. },
  6032. {
  6033. "name": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  6034. },
  6035. {
  6036. "name": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6037. },
  6038. {
  6039. "name": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6040. },
  6041. {
  6042. "name": "aten::std_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6043. },
  6044. {
  6045. "name": "aten::std_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6046. },
  6047. {
  6048. "name": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  6049. },
  6050. {
  6051. "name": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor"
  6052. },
  6053. {
  6054. "name": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor"
  6055. },
  6056. {
  6057. "name": "aten::str(t elem) -> str"
  6058. },
  6059. {
  6060. "name": "aten::stride.int(Tensor self, int dim) -> int"
  6061. },
  6062. {
  6063. "name": "aten::stride.Dimname(Tensor self, str dim) -> int"
  6064. },
  6065. {
  6066. "name": "aten::stride(Tensor self) -> int[]"
  6067. },
  6068. {
  6069. "name": "aten::strip(str self, str chars=\" \\n\\t\\f\\v\") -> str"
  6070. },
  6071. {
  6072. "name": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  6073. },
  6074. {
  6075. "name": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor"
  6076. },
  6077. {
  6078. "name": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"
  6079. },
  6080. {
  6081. "name": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)"
  6082. },
  6083. {
  6084. "name": "aten::sub.int(int a, int b) -> int"
  6085. },
  6086. {
  6087. "name": "aten::sub.complex(complex a, complex b) -> complex"
  6088. },
  6089. {
  6090. "name": "aten::sub.float(float a, float b) -> float"
  6091. },
  6092. {
  6093. "name": "aten::sub.int_complex(int a, complex b) -> complex"
  6094. },
  6095. {
  6096. "name": "aten::sub.complex_int(complex a, int b) -> complex"
  6097. },
  6098. {
  6099. "name": "aten::sub.float_complex(float a, complex b) -> complex"
  6100. },
  6101. {
  6102. "name": "aten::sub.complex_float(complex a, float b) -> complex"
  6103. },
  6104. {
  6105. "name": "aten::sub.int_float(int a, float b) -> float"
  6106. },
  6107. {
  6108. "name": "aten::sub.float_int(float a, int b) -> float"
  6109. },
  6110. {
  6111. "name": "aten::sub(Scalar a, Scalar b) -> Scalar"
  6112. },
  6113. {
  6114. "name": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)"
  6115. },
  6116. {
  6117. "name": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)"
  6118. },
  6119. {
  6120. "name": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6121. },
  6122. {
  6123. "name": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor"
  6124. },
  6125. {
  6126. "name": "aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"
  6127. },
  6128. {
  6129. "name": "aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6130. },
  6131. {
  6132. "name": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6133. },
  6134. {
  6135. "name": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"
  6136. },
  6137. {
  6138. "name": "aten::sum.int(int[] self) -> int"
  6139. },
  6140. {
  6141. "name": "aten::sum.float(float[] self) -> float"
  6142. },
  6143. {
  6144. "name": "aten::sum.complex(complex[] self) -> complex"
  6145. },
  6146. {
  6147. "name": "aten::sum.bool(bool[] self) -> int"
  6148. },
  6149. {
  6150. "name": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)"
  6151. },
  6152. {
  6153. "name": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)"
  6154. },
  6155. {
  6156. "name": "aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()"
  6157. },
  6158. {
  6159. "name": "aten::sym_size.int(Tensor self, int dim) -> SymInt"
  6160. },
  6161. {
  6162. "name": "aten::sym_size(Tensor self) -> SymInt[]"
  6163. },
  6164. {
  6165. "name": "aten::t(Tensor(a) self) -> Tensor(a)"
  6166. },
  6167. {
  6168. "name": "aten::t_(Tensor(a!) self) -> Tensor(a!)"
  6169. },
  6170. {
  6171. "name": "aten::take(Tensor self, Tensor index) -> Tensor",
  6172. "category": "Activation"
  6173. },
  6174. {
  6175. "name": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"
  6176. },
  6177. {
  6178. "name": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor"
  6179. },
  6180. {
  6181. "name": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"
  6182. },
  6183. {
  6184. "name": "aten::tan(Tensor self) -> Tensor"
  6185. },
  6186. {
  6187. "name": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6188. },
  6189. {
  6190. "name": "aten::tan.int(int a) -> float"
  6191. },
  6192. {
  6193. "name": "aten::tan.float(float a) -> float"
  6194. },
  6195. {
  6196. "name": "aten::tan.complex(complex a) -> complex"
  6197. },
  6198. {
  6199. "name": "aten::tan.Scalar(Scalar a) -> Scalar"
  6200. },
  6201. {
  6202. "name": "aten::tan_(Tensor(a!) self) -> Tensor(a!)"
  6203. },
  6204. {
  6205. "name": "aten::tanh(Tensor self) -> Tensor",
  6206. "category": "Activation"
  6207. },
  6208. {
  6209. "name": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)",
  6210. "category": "Activation"
  6211. },
  6212. {
  6213. "name": "aten::tanh.int(int a) -> float",
  6214. "category": "Activation"
  6215. },
  6216. {
  6217. "name": "aten::tanh.float(float a) -> float",
  6218. "category": "Activation"
  6219. },
  6220. {
  6221. "name": "aten::tanh.complex(complex a) -> complex",
  6222. "category": "Activation"
  6223. },
  6224. {
  6225. "name": "aten::tanh.Scalar(Scalar a) -> Scalar",
  6226. "category": "Activation"
  6227. },
  6228. {
  6229. "name": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)",
  6230. "category": "Activation"
  6231. },
  6232. {
  6233. "name": "aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6234. },
  6235. {
  6236. "name": "aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6237. },
  6238. {
  6239. "name": "aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6240. },
  6241. {
  6242. "name": "aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6243. },
  6244. {
  6245. "name": "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor"
  6246. },
  6247. {
  6248. "name": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]"
  6249. },
  6250. {
  6251. "name": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]"
  6252. },
  6253. {
  6254. "name": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]"
  6255. },
  6256. {
  6257. "name": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor"
  6258. },
  6259. {
  6260. "name": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)"
  6261. },
  6262. {
  6263. "name": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor",
  6264. "category": "Activation"
  6265. },
  6266. {
  6267. "name": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"
  6268. },
  6269. {
  6270. "name": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)",
  6271. "category": "Activation"
  6272. },
  6273. {
  6274. "name": "aten::tile(Tensor self, SymInt[] dims) -> Tensor"
  6275. },
  6276. {
  6277. "name": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6278. },
  6279. {
  6280. "name": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6281. },
  6282. {
  6283. "name": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6284. },
  6285. {
  6286. "name": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)"
  6287. },
  6288. {
  6289. "name": "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6290. },
  6291. {
  6292. "name": "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6293. },
  6294. {
  6295. "name": "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"
  6296. },
  6297. {
  6298. "name": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor"
  6299. },
  6300. {
  6301. "name": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor"
  6302. },
  6303. {
  6304. "name": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor"
  6305. },
  6306. {
  6307. "name": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)"
  6308. },
  6309. {
  6310. "name": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor"
  6311. },
  6312. {
  6313. "name": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)"
  6314. },
  6315. {
  6316. "name": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor"
  6317. },
  6318. {
  6319. "name": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor"
  6320. },
  6321. {
  6322. "name": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor"
  6323. },
  6324. {
  6325. "name": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  6326. },
  6327. {
  6328. "name": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor"
  6329. },
  6330. {
  6331. "name": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor"
  6332. },
  6333. {
  6334. "name": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor"
  6335. },
  6336. {
  6337. "name": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)"
  6338. },
  6339. {
  6340. "name": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"
  6341. },
  6342. {
  6343. "name": "aten::trace(Tensor self) -> Tensor"
  6344. },
  6345. {
  6346. "name": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6347. },
  6348. {
  6349. "name": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)",
  6350. "category": "Transform"
  6351. },
  6352. {
  6353. "name": "aten::transpose.Dimname(Tensor(a) self, str dim0, str dim1) -> Tensor(a)",
  6354. "category": "Transform"
  6355. },
  6356. {
  6357. "name": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)",
  6358. "category": "Transform"
  6359. },
  6360. {
  6361. "name": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor"
  6362. },
  6363. {
  6364. "name": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)"
  6365. },
  6366. {
  6367. "name": "aten::tril(Tensor self, SymInt diagonal=0) -> Tensor",
  6368. "category": "Layer"
  6369. },
  6370. {
  6371. "name": "aten::tril.out(Tensor self, SymInt diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  6372. },
  6373. {
  6374. "name": "aten::tril_(Tensor(a!) self, SymInt diagonal=0) -> Tensor(a!)"
  6375. },
  6376. {
  6377. "name": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
  6378. "category": "Layer"
  6379. },
  6380. {
  6381. "name": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  6382. },
  6383. {
  6384. "name": "aten::triu(Tensor self, SymInt diagonal=0) -> Tensor"
  6385. },
  6386. {
  6387. "name": "aten::triu.out(Tensor self, SymInt diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"
  6388. },
  6389. {
  6390. "name": "aten::triu_(Tensor(a!) self, SymInt diagonal=0) -> Tensor(a!)"
  6391. },
  6392. {
  6393. "name": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  6394. },
  6395. {
  6396. "name": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)"
  6397. },
  6398. {
  6399. "name": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor"
  6400. },
  6401. {
  6402. "name": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor"
  6403. },
  6404. {
  6405. "name": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6406. },
  6407. {
  6408. "name": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  6409. },
  6410. {
  6411. "name": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  6412. },
  6413. {
  6414. "name": "aten::trunc(Tensor self) -> Tensor"
  6415. },
  6416. {
  6417. "name": "aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6418. },
  6419. {
  6420. "name": "aten::type_as(Tensor self, Tensor other) -> Tensor"
  6421. },
  6422. {
  6423. "name": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]"
  6424. },
  6425. {
  6426. "name": "aten::unbind.Dimname(Tensor(a -> *) self, str dim) -> Tensor(a)[]"
  6427. },
  6428. {
  6429. "name": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)",
  6430. "category": "Shape"
  6431. },
  6432. {
  6433. "name": "aten::unflatten.Dimname(Tensor(a) self, str dim, SymInt[] sizes, str[] names) -> Tensor(a)",
  6434. "category": "Shape"
  6435. },
  6436. {
  6437. "name": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)"
  6438. },
  6439. {
  6440. "name": "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor"
  6441. },
  6442. {
  6443. "name": "aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)"
  6444. },
  6445. {
  6446. "name": "aten::uniform_(Tensor(a!) self, float from=0., float to=1., *, Generator? generator=None) -> Tensor(a!)"
  6447. },
  6448. {
  6449. "name": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)",
  6450. "category": "Layer"
  6451. },
  6452. {
  6453. "name": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6454. },
  6455. {
  6456. "name": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
  6457. },
  6458. {
  6459. "name": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6460. },
  6461. {
  6462. "name": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)",
  6463. "category": "Layer"
  6464. },
  6465. {
  6466. "name": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))"
  6467. },
  6468. {
  6469. "name": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]"
  6470. },
  6471. {
  6472. "name": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]",
  6473. "category": "Tensor"
  6474. },
  6475. {
  6476. "name": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()"
  6477. },
  6478. {
  6479. "name": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)",
  6480. "category": "Transform"
  6481. },
  6482. {
  6483. "name": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)",
  6484. "category": "Transform"
  6485. },
  6486. {
  6487. "name": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor"
  6488. },
  6489. {
  6490. "name": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"
  6491. },
  6492. {
  6493. "name": "aten::update.str(Dict(str, t)(a!) self, Dict(str, t)(a!) to_add) -> ()"
  6494. },
  6495. {
  6496. "name": "aten::update.int(Dict(int, t)(a!) self, Dict(int, t)(a!) to_add) -> ()"
  6497. },
  6498. {
  6499. "name": "aten::update.bool(Dict(bool, t)(a!) self, Dict(bool, t)(a!) to_add) -> ()"
  6500. },
  6501. {
  6502. "name": "aten::update.float(Dict(float, t)(a!) self, Dict(float, t)(a!) to_add) -> ()"
  6503. },
  6504. {
  6505. "name": "aten::update.complex(Dict(complex, t)(a!) self, Dict(complex, t)(a!) to_add) -> ()"
  6506. },
  6507. {
  6508. "name": "aten::update.Tensor(Dict(Tensor, t)(a!) self, Dict(Tensor, t)(a!) to_add) -> ()"
  6509. },
  6510. {
  6511. "name": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6512. "category": "Layer"
  6513. },
  6514. {
  6515. "name": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6516. "category": "Layer"
  6517. },
  6518. {
  6519. "name": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)",
  6520. "category": "Layer"
  6521. },
  6522. {
  6523. "name": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6524. "category": "Layer"
  6525. },
  6526. {
  6527. "name": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6528. "category": "Layer"
  6529. },
  6530. {
  6531. "name": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)",
  6532. "category": "Layer"
  6533. },
  6534. {
  6535. "name": "aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)",
  6536. "category": "Layer"
  6537. },
  6538. {
  6539. "name": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor",
  6540. "category": "Layer"
  6541. },
  6542. {
  6543. "name": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)",
  6544. "category": "Layer"
  6545. },
  6546. {
  6547. "name": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor",
  6548. "category": "Layer"
  6549. },
  6550. {
  6551. "name": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor",
  6552. "category": "Layer"
  6553. },
  6554. {
  6555. "name": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)",
  6556. "category": "Layer"
  6557. },
  6558. {
  6559. "name": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor",
  6560. "category": "Layer"
  6561. },
  6562. {
  6563. "name": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6564. "category": "Layer"
  6565. },
  6566. {
  6567. "name": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)"
  6568. },
  6569. {
  6570. "name": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor",
  6571. "category": "Layer"
  6572. },
  6573. {
  6574. "name": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6575. "category": "Layer"
  6576. },
  6577. {
  6578. "name": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6579. },
  6580. {
  6581. "name": "aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)"
  6582. },
  6583. {
  6584. "name": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor"
  6585. },
  6586. {
  6587. "name": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6588. },
  6589. {
  6590. "name": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor",
  6591. "category": "Layer"
  6592. },
  6593. {
  6594. "name": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor",
  6595. "category": "Layer"
  6596. },
  6597. {
  6598. "name": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6599. },
  6600. {
  6601. "name": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  6602. },
  6603. {
  6604. "name": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor"
  6605. },
  6606. {
  6607. "name": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)"
  6608. },
  6609. {
  6610. "name": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor"
  6611. },
  6612. {
  6613. "name": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)"
  6614. },
  6615. {
  6616. "name": "aten::values(Tensor(a) self) -> Tensor(a)"
  6617. },
  6618. {
  6619. "name": "aten::values.str(Dict(str, t) self) -> t[](*)"
  6620. },
  6621. {
  6622. "name": "aten::values.int(Dict(int, t) self) -> t[](*)"
  6623. },
  6624. {
  6625. "name": "aten::values.bool(Dict(bool, t) self) -> t[](*)"
  6626. },
  6627. {
  6628. "name": "aten::values.float(Dict(float, t) self) -> t[](*)"
  6629. },
  6630. {
  6631. "name": "aten::values.complex(Dict(complex, t) self) -> t[](*)"
  6632. },
  6633. {
  6634. "name": "aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)"
  6635. },
  6636. {
  6637. "name": "aten::var(Tensor self, bool unbiased=True) -> Tensor"
  6638. },
  6639. {
  6640. "name": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6641. },
  6642. {
  6643. "name": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6644. },
  6645. {
  6646. "name": "aten::var.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"
  6647. },
  6648. {
  6649. "name": "aten::var.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6650. },
  6651. {
  6652. "name": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"
  6653. },
  6654. {
  6655. "name": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6656. },
  6657. {
  6658. "name": "aten::var.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor"
  6659. },
  6660. {
  6661. "name": "aten::var.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)"
  6662. },
  6663. {
  6664. "name": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)"
  6665. },
  6666. {
  6667. "name": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6668. },
  6669. {
  6670. "name": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6671. },
  6672. {
  6673. "name": "aten::var_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"
  6674. },
  6675. {
  6676. "name": "aten::var_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)"
  6677. },
  6678. {
  6679. "name": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
  6680. },
  6681. {
  6682. "name": "aten::vdot(Tensor self, Tensor other) -> Tensor"
  6683. },
  6684. {
  6685. "name": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6686. },
  6687. {
  6688. "name": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)"
  6689. },
  6690. {
  6691. "name": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)"
  6692. },
  6693. {
  6694. "name": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)"
  6695. },
  6696. {
  6697. "name": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)"
  6698. },
  6699. {
  6700. "name": "aten::view_as_complex_copy(Tensor self) -> Tensor"
  6701. },
  6702. {
  6703. "name": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6704. },
  6705. {
  6706. "name": "aten::view_as_real(Tensor(a) self) -> Tensor(a)"
  6707. },
  6708. {
  6709. "name": "aten::view_as_real_copy(Tensor self) -> Tensor"
  6710. },
  6711. {
  6712. "name": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"
  6713. },
  6714. {
  6715. "name": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor"
  6716. },
  6717. {
  6718. "name": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor"
  6719. },
  6720. {
  6721. "name": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  6722. },
  6723. {
  6724. "name": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  6725. },
  6726. {
  6727. "name": "aten::vstack(Tensor[] tensors) -> Tensor"
  6728. },
  6729. {
  6730. "name": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)"
  6731. },
  6732. {
  6733. "name": "aten::wait(Future(t) self) -> t"
  6734. },
  6735. {
  6736. "name": "aten::warn(str message, int stacklevel=2) -> ()"
  6737. },
  6738. {
  6739. "name": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor"
  6740. },
  6741. {
  6742. "name": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor"
  6743. },
  6744. {
  6745. "name": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor"
  6746. },
  6747. {
  6748. "name": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor"
  6749. },
  6750. {
  6751. "name": "aten::where(Tensor condition) -> Tensor[]"
  6752. },
  6753. {
  6754. "name": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6755. },
  6756. {
  6757. "name": "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor"
  6758. },
  6759. {
  6760. "name": "aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor"
  6761. },
  6762. {
  6763. "name": "aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor"
  6764. },
  6765. {
  6766. "name": "aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6767. },
  6768. {
  6769. "name": "aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6770. },
  6771. {
  6772. "name": "aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"
  6773. },
  6774. {
  6775. "name": "aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
  6776. },
  6777. {
  6778. "name": "aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)"
  6779. },
  6780. {
  6781. "name": "aten::zero_(Tensor(a!) self) -> Tensor(a!)"
  6782. },
  6783. {
  6784. "name": "aten::zeros.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  6785. },
  6786. {
  6787. "name": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"
  6788. },
  6789. {
  6790. "name": "aten::zeros.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)"
  6791. },
  6792. {
  6793. "name": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)"
  6794. },
  6795. {
  6796. "name": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
  6797. },
  6798. {
  6799. "name": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"
  6800. },
  6801. {
  6802. "name": "cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  6803. },
  6804. {
  6805. "name": "cortex_m::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  6806. },
  6807. {
  6808. "name": "cortex_m::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6809. },
  6810. {
  6811. "name": "cortex_m::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
  6812. },
  6813. {
  6814. "name": "cortex_m::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  6815. },
  6816. {
  6817. "name": "cortex_m::quantized_add.out(Tensor self, Scalar self_zero_point, Scalar self_multiplier, Scalar self_shift, Tensor other, Scalar other_zero_point, Scalar other_multiplier, Scalar other_shift, Scalar output_zero_point, Scalar output_multiplier, Scalar output_shift, *, Tensor(a!) out) -> Tensor(a!)"
  6818. },
  6819. {
  6820. "name": "cortex_m::quantized_conv2d.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int input_offset, int output_offset, Tensor requantize_multipliers, Tensor requantize_shifts, int activation_min, int activation_max, *, Tensor(a!) out) -> Tensor(a!)"
  6821. },
  6822. {
  6823. "name": "cortex_m::quantized_linear.out(Tensor input, Tensor weights, Tensor? bias, Tensor? kernel_sum, Scalar input_offset, Scalar filter_offset, Scalar output_offset, int[] requantize_multipliers, int[] requantize_shifts, Scalar activation_max, Scalar activation_min, *, Tensor(a!) out) -> Tensor(a!)"
  6824. },
  6825. {
  6826. "name": "cortex_m::quantized_mul.out(Tensor self, Scalar self_zero_point, Tensor other, Scalar other_zero_point, Scalar output_zero_point, Scalar output_multiplier, Scalar output_shift, *, Tensor(a!) out) -> Tensor(a!)"
  6827. },
  6828. {
  6829. "name": "cortex_m::transpose.out(Tensor input, int[] perm, *, Tensor(a!) out) -> Tensor(a!)"
  6830. },
  6831. {
  6832. "name": "cuda::_current_device() -> int"
  6833. },
  6834. {
  6835. "name": "detectron2::nms_rotated(Tensor boxes, Tensor scores, float iou_threshold) -> Tensor"
  6836. },
  6837. {
  6838. "name": "detectron2::roi_align_rotated_forward(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio) -> Tensor"
  6839. },
  6840. {
  6841. "name": "dim_order_ops::_clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  6842. },
  6843. {
  6844. "name": "dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  6845. },
  6846. {
  6847. "name": "dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
  6848. },
  6849. {
  6850. "name": "executorch_prim::add.Scalar(Scalar a, Scalar b) -> Scalar"
  6851. },
  6852. {
  6853. "name": "executorch_prim::ceil.Scalar(Scalar a) -> Scalar"
  6854. },
  6855. {
  6856. "name": "executorch_prim::eq.Scalar(Scalar a, Scalar b) -> bool"
  6857. },
  6858. {
  6859. "name": "executorch_prim::et_view.default(Tensor self, int[] size) -> (Tensor out)"
  6860. },
  6861. {
  6862. "name": "executorch_prim::floordiv.Scalar(Scalar a, Scalar b) -> Scalar"
  6863. },
  6864. {
  6865. "name": "executorch_prim::ge.Scalar(Scalar a, Scalar b) -> bool"
  6866. },
  6867. {
  6868. "name": "executorch_prim::gt.Scalar(Scalar a, Scalar b) -> bool"
  6869. },
  6870. {
  6871. "name": "executorch_prim::le.Scalar(Scalar a, Scalar b) -> bool"
  6872. },
  6873. {
  6874. "name": "executorch_prim::lt.Scalar(Scalar a, Scalar b) -> bool"
  6875. },
  6876. {
  6877. "name": "executorch_prim::mod.Scalar(SymInt a, SymInt b) -> SymInt"
  6878. },
  6879. {
  6880. "name": "executorch_prim::mul.Scalar(Scalar a, Scalar b) -> Scalar"
  6881. },
  6882. {
  6883. "name": "executorch_prim::neg.Scalar(Scalar a) -> Scalar"
  6884. },
  6885. {
  6886. "name": "executorch_prim::round.Scalar(Scalar a) -> Scalar"
  6887. },
  6888. {
  6889. "name": "executorch_prim::sub.Scalar(Scalar a, Scalar b) -> Scalar"
  6890. },
  6891. {
  6892. "name": "executorch_prim::sym_float.Scalar(Scalar a) -> Scalar"
  6893. },
  6894. {
  6895. "name": "executorch_prim::sym_max.Scalar(Scalar a, Scalar b) -> Scalar"
  6896. },
  6897. {
  6898. "name": "executorch_prim::sym_min.Scalar(Scalar a, Scalar b) -> Scalar"
  6899. },
  6900. {
  6901. "name": "executorch_prim::truediv.Scalar(Scalar a, Scalar b) -> Scalar"
  6902. },
  6903. {
  6904. "name": "executorch_prim::trunc.Scalar(Scalar a) -> Scalar"
  6905. },
  6906. {
  6907. "name": "fbgemm::asynchronous_complete_cumsum(Tensor t_in) -> Tensor"
  6908. },
  6909. {
  6910. "name": "fbgemm::bf16i4bf16_rowwise(Tensor X, Tensor WQ, Tensor w_scale, Tensor w_zp) -> Tensor"
  6911. },
  6912. {
  6913. "name": "fbgemm::car_init(int rank, int world_size, Tensor local_barrier, Tensor[] all_barrier_handles, Tensor local_buffer, Tensor[] all_buffer_handles) -> ()"
  6914. },
  6915. {
  6916. "name": "fbgemm::car_ipc_handle(Tensor buffer) -> Tensor"
  6917. },
  6918. {
  6919. "name": "fbgemm::car_tensor() -> Tensor"
  6920. },
  6921. {
  6922. "name": "fbgemm::dequantize_fp8_cache(Tensor cache_K, Tensor cache_V, Tensor kv_seqlen, Tensor? qparam_k=None, Tensor? qparam_v=None) -> (Tensor, Tensor)"
  6923. },
  6924. {
  6925. "name": "fbgemm::dequantize_int4_cache(Tensor cache_K, Tensor cache_V, Tensor kv_seqlen, int? num_groups=1) -> (Tensor, Tensor)"
  6926. },
  6927. {
  6928. "name": "fbgemm::f8f8bf16(Tensor XQ, Tensor WQ, Tensor scale, bool use_fast_accum=True) -> Tensor"
  6929. },
  6930. {
  6931. "name": "fbgemm::f8f8bf16_blockwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, int block_m=128, int block_n=128, int block_k=128) -> Tensor"
  6932. },
  6933. {
  6934. "name": "fbgemm::f8f8bf16_cublas(Tensor A, Tensor B, Tensor? Ainvs=None, Tensor? Binvs=None, bool use_fast_accum=True, Tensor(a!)? output=None) -> Tensor"
  6935. },
  6936. {
  6937. "name": "fbgemm::f8f8bf16_rowwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, Tensor? bias=None, bool use_fast_accum=True, Tensor(a!)? output=None) -> Tensor"
  6938. },
  6939. {
  6940. "name": "fbgemm::f8f8bf16_tensorwise(Tensor XQ, Tensor WQ, float scale, bool use_fast_accum=True) -> Tensor"
  6941. },
  6942. {
  6943. "name": "fbgemm::f8i4bf16_rowwise(Tensor XQ, Tensor WQ, Tensor x_scale, Tensor w_scale, Tensor w_zp) -> Tensor"
  6944. },
  6945. {
  6946. "name": "fbgemm::get_fp8_per_tensor_scale(Tensor input, Tensor? bs=None, Tensor? scale_ub=None) -> Tensor"
  6947. },
  6948. {
  6949. "name": "fbgemm::gqa_attn_splitk(Tensor XQ, Tensor cache_K, Tensor cache_V, Tensor seq_positions, float qk_scale, int num_split_ks, int kv_cache_quant_num_groups=1, bool use_tensor_cores=True, int cache_logical_dtype_int=0) -> (Tensor, Tensor, Tensor)"
  6950. },
  6951. {
  6952. "name": "fbgemm::i8i8bf16(Tensor XQ, Tensor WQ, float scale, int split_k=1) -> Tensor"
  6953. },
  6954. {
  6955. "name": "fbgemm::i8i8bf16_dynamic(Tensor XQ, Tensor WQ, Tensor scale, int split_k=1) -> Tensor"
  6956. },
  6957. {
  6958. "name": "fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.) -> Tensor"
  6959. },
  6960. {
  6961. "name": "fbgemm::mqa_attn(Tensor XQ, Tensor cache_K, Tensor cache_V, Tensor seq_positions, float qk_scale, int? num_groups=1, int cache_logical_dtype_int=0) -> Tensor"
  6962. },
  6963. {
  6964. "name": "fbgemm::nccl_allgather(Tensor dst, Tensor src, int comm_idx=0) -> ()"
  6965. },
  6966. {
  6967. "name": "fbgemm::nccl_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  6968. },
  6969. {
  6970. "name": "fbgemm::nccl_alltoall(Tensor dst, Tensor src, int world_size, int comm_idx=0) -> ()"
  6971. },
  6972. {
  6973. "name": "fbgemm::nccl_comm_init_rank(int world_size, int rank, Tensor id_, int comm_idx=0) -> ()"
  6974. },
  6975. {
  6976. "name": "fbgemm::nccl_get_unique_id() -> Tensor"
  6977. },
  6978. {
  6979. "name": "fbgemm::nccl_init(int rank, int world_size, str rendevouz, int comm_idx=0) -> ()"
  6980. },
  6981. {
  6982. "name": "fbgemm::nccl_reducescatter(Tensor dst, Tensor src, int comm_idx=0) -> ()"
  6983. },
  6984. {
  6985. "name": "fbgemm::one_shot_car_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  6986. },
  6987. {
  6988. "name": "fbgemm::per_tensor_dynamic_quantize_i8(Tensor X) -> (Tensor, Tensor)"
  6989. },
  6990. {
  6991. "name": "fbgemm::per_tensor_quantize_i8(Tensor X, float scale) -> Tensor"
  6992. },
  6993. {
  6994. "name": "fbgemm::quantize_fp8_per_col(Tensor input, Tensor? bs=None, Tensor? scale_ub=None) -> Tensor[]"
  6995. },
  6996. {
  6997. "name": "fbgemm::quantize_fp8_per_row(Tensor input, Tensor? bs=None, Tensor? scale_ub=None, ScalarType? output_dtype=None, bool stochastic_rounding=False) -> Tensor[]"
  6998. },
  6999. {
  7000. "name": "fbgemm::quantize_fp8_per_tensor(Tensor input, Tensor? bs=None, Tensor? scale_ub=None, bool stochastic_rounding=False) -> Tensor[]"
  7001. },
  7002. {
  7003. "name": "fbgemm::quantize_fp8_per_tensor_fixed_scale(Tensor input, Tensor scale, Tensor? bs=None, bool stochatic_rounding=False) -> Tensor"
  7004. },
  7005. {
  7006. "name": "fbgemm::rope_qkv_decoding(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor seqpos, float theta, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? actual_batch_size=None, Tensor? batch=None, Tensor? cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7007. },
  7008. {
  7009. "name": "fbgemm::rope_qkv_varseq_prefill(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor varseq_batch, Tensor varseq_seqpos, float theta, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? varseq_cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7010. },
  7011. {
  7012. "name": "fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor"
  7013. },
  7014. {
  7015. "name": "fbgemm::silu_mul_quantize_i8(Tensor X1, Tensor X2, float scale) -> Tensor"
  7016. },
  7017. {
  7018. "name": "fbgemm::two_shot_car_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()"
  7019. },
  7020. {
  7021. "name": "fbgemm::xpos_qkv_decoding(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor seqpos, float theta, float gamma, float scale_base, float exponent_offset, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? actual_batch_size=None, Tensor? batch=None, Tensor? cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7022. },
  7023. {
  7024. "name": "fbgemm::xpos_qkv_varseq_prefill(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor varseq_batch, Tensor varseq_seqpos, float theta, float gamma, float scale_base, float exponent_offset, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? varseq_cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
  7025. },
  7026. {
  7027. "name": "horizon::scale_quanti(Tensor x, Tensor scale, Tensor zero_point, int d, int min, int max, bool flag1, bool flat2, str str1, str str2) -> Tensor"
  7028. },
  7029. {
  7030. "name": "llama::custom_sdpa.out(Tensor query, Tensor key, Tensor value, SymInt start_pos, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(a!) out) -> Tensor(a!)"
  7031. },
  7032. {
  7033. "name": "llama::custom_sdpa(Tensor query, Tensor key, Tensor value, SymInt start_pos, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None) -> Tensor"
  7034. },
  7035. {
  7036. "name": "llama::fast_hadamard_transform.out(Tensor mat, *, Tensor(a!) out) -> Tensor(a!)"
  7037. },
  7038. {
  7039. "name": "llama::sdpa.out(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(a!) out) -> Tensor(a!)"
  7040. },
  7041. {
  7042. "name": "llama::sdpa_with_kv_cache.out(Tensor query, Tensor key, Tensor value, Tensor(a!) key_cache, Tensor(b!) value_cache, SymInt start_pos, SymInt seq_len, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None, *, Tensor(c!) out) -> Tensor(c!)"
  7043. },
  7044. {
  7045. "name": "llama::sdpa_with_kv_cache(Tensor query, Tensor key, Tensor value, Tensor(a!) key_cache, Tensor(b!) value_cache, SymInt start_pos, SymInt seq_len, Tensor? attn_mask=None, float drpout_p=0.0, bool is_causal=False, float? scale=None) -> Tensor"
  7046. },
  7047. {
  7048. "name": "llama::update_cache.out(Tensor value, Tensor(a!) cache, SymInt start_pos, *, Tensor(b!) out) -> Tensor(b!)"
  7049. },
  7050. {
  7051. "name": "llama::update_cache(Tensor value, Tensor(a!) cache, SymInt start_pos) -> Tensor"
  7052. },
  7053. {
  7054. "name": "neuron::_execute_neuron(__torch__.torch.classes.neuron.Model _0, Tensor[] _1) -> Tensor[] _0"
  7055. },
  7056. {
  7057. "name": "neuron::_from_neuron(Tensor _0) -> Tensor _0"
  7058. },
  7059. {
  7060. "name": "neuron::_init_neuron() -> ()"
  7061. },
  7062. {
  7063. "name": "neuron::_load_collectives_neuron(__torch__.torch.classes.neuron.Model _0, int _1, int _2, int _3, int _4) -> ()"
  7064. },
  7065. {
  7066. "name": "neuron::_load_neuron(__torch__.torch.classes.neuron.Model _0) -> ()"
  7067. },
  7068. {
  7069. "name": "neuron::_parallel_executor_run(__torch__.torch.classes.neuron.ParallelExecutor _0, Tensor[] _1, int _2) -> Tensor[] _0"
  7070. },
  7071. {
  7072. "name": "neuron::_parallel_from_neuron(Tensor _0) -> Tensor[] _0"
  7073. },
  7074. {
  7075. "name": "neuron::_parallel_load(Dict(str, Tensor)[] _0) -> Dict(str, Tensor)[] _0"
  7076. },
  7077. {
  7078. "name": "neuron::_parallel_profile_start_neuron(__torch__.torch.classes.neuron.ParallelModel _0, str _1, int _2) -> str[] _0"
  7079. },
  7080. {
  7081. "name": "neuron::_parallel_profile_stop_neuron(str[] _0) -> ()"
  7082. },
  7083. {
  7084. "name": "neuron::_parallel_run_neuron(__torch__.torch.classes.neuron.ParallelModel _0, __torch__.torch.classes.neuron.ParallelTensorSet _1, __torch__.torch.classes.neuron.ParallelTensorSet _2) -> ()"
  7085. },
  7086. {
  7087. "name": "neuron::_parallel_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  7088. },
  7089. {
  7090. "name": "neuron::_parallel_to_neuron(Tensor[] _0) -> Tensor _0"
  7091. },
  7092. {
  7093. "name": "neuron::_parallel_write_neuron(Tensor _0, Tensor[] _1) -> ()"
  7094. },
  7095. {
  7096. "name": "neuron::_profile_start_neuron(__torch__.torch.classes.neuron.Model _0, str _1) -> ()"
  7097. },
  7098. {
  7099. "name": "neuron::_profile_stop_neuron(str _0) -> ()"
  7100. },
  7101. {
  7102. "name": "neuron::_slice_neuron(Tensor _0, int _1, int _2, int _3, int _4) -> Tensor _0"
  7103. },
  7104. {
  7105. "name": "neuron::_to_neuron(Tensor _0, int _1) -> Tensor _0"
  7106. },
  7107. {
  7108. "name": "neuron::create_module_from_graph(str _0, str _1) -> str _0"
  7109. },
  7110. {
  7111. "name": "neuron::forward_1(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> Tensor _0"
  7112. },
  7113. {
  7114. "name": "neuron::forward_10(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  7115. },
  7116. {
  7117. "name": "neuron::forward_11(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  7118. },
  7119. {
  7120. "name": "neuron::forward_12(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  7121. },
  7122. {
  7123. "name": "neuron::forward_13(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  7124. },
  7125. {
  7126. "name": "neuron::forward_14(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  7127. },
  7128. {
  7129. "name": "neuron::forward_15(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  7130. },
  7131. {
  7132. "name": "neuron::forward_16(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  7133. },
  7134. {
  7135. "name": "neuron::forward_17(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  7136. },
  7137. {
  7138. "name": "neuron::forward_18(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  7139. },
  7140. {
  7141. "name": "neuron::forward_19(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  7142. },
  7143. {
  7144. "name": "neuron::forward_2(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  7145. },
  7146. {
  7147. "name": "neuron::forward_20(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  7148. },
  7149. {
  7150. "name": "neuron::forward_21(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  7151. },
  7152. {
  7153. "name": "neuron::forward_22(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  7154. },
  7155. {
  7156. "name": "neuron::forward_23(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  7157. },
  7158. {
  7159. "name": "neuron::forward_24(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  7160. },
  7161. {
  7162. "name": "neuron::forward_25(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  7163. },
  7164. {
  7165. "name": "neuron::forward_26(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  7166. },
  7167. {
  7168. "name": "neuron::forward_27(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  7169. },
  7170. {
  7171. "name": "neuron::forward_28(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  7172. },
  7173. {
  7174. "name": "neuron::forward_29(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  7175. },
  7176. {
  7177. "name": "neuron::forward_3(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  7178. },
  7179. {
  7180. "name": "neuron::forward_30(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  7181. },
  7182. {
  7183. "name": "neuron::forward_31(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  7184. },
  7185. {
  7186. "name": "neuron::forward_32(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  7187. },
  7188. {
  7189. "name": "neuron::forward_33(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  7190. },
  7191. {
  7192. "name": "neuron::forward_34(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33)"
  7193. },
  7194. {
  7195. "name": "neuron::forward_35(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  7196. },
  7197. {
  7198. "name": "neuron::forward_36(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  7199. },
  7200. {
  7201. "name": "neuron::forward_37(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  7202. },
  7203. {
  7204. "name": "neuron::forward_38(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  7205. },
  7206. {
  7207. "name": "neuron::forward_39(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  7208. },
  7209. {
  7210. "name": "neuron::forward_4(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7211. },
  7212. {
  7213. "name": "neuron::forward_40(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7214. },
  7215. {
  7216. "name": "neuron::forward_41(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7217. },
  7218. {
  7219. "name": "neuron::forward_42(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7220. },
  7221. {
  7222. "name": "neuron::forward_43(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7223. },
  7224. {
  7225. "name": "neuron::forward_44(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7226. },
  7227. {
  7228. "name": "neuron::forward_45(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7229. },
  7230. {
  7231. "name": "neuron::forward_46(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7232. },
  7233. {
  7234. "name": "neuron::forward_47(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7235. },
  7236. {
  7237. "name": "neuron::forward_48(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7238. },
  7239. {
  7240. "name": "neuron::forward_49(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7241. },
  7242. {
  7243. "name": "neuron::forward_5(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7244. },
  7245. {
  7246. "name": "neuron::forward_50(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7247. },
  7248. {
  7249. "name": "neuron::forward_51(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7250. },
  7251. {
  7252. "name": "neuron::forward_52(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7253. },
  7254. {
  7255. "name": "neuron::forward_53(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7256. },
  7257. {
  7258. "name": "neuron::forward_54(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7259. },
  7260. {
  7261. "name": "neuron::forward_55(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7262. },
  7263. {
  7264. "name": "neuron::forward_56(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7265. },
  7266. {
  7267. "name": "neuron::forward_57(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7268. },
  7269. {
  7270. "name": "neuron::forward_58(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7271. },
  7272. {
  7273. "name": "neuron::forward_59(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7274. },
  7275. {
  7276. "name": "neuron::forward_6(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7277. },
  7278. {
  7279. "name": "neuron::forward_60(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7280. },
  7281. {
  7282. "name": "neuron::forward_61(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7283. },
  7284. {
  7285. "name": "neuron::forward_62(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7286. },
  7287. {
  7288. "name": "neuron::forward_63(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7289. },
  7290. {
  7291. "name": "neuron::forward_64(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7292. },
  7293. {
  7294. "name": "neuron::forward_7(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7295. },
  7296. {
  7297. "name": "neuron::forward_8(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7298. },
  7299. {
  7300. "name": "neuron::forward_9(Tensor[] _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7301. },
  7302. {
  7303. "name": "neuron::forward_v2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor[] _0"
  7304. },
  7305. {
  7306. "name": "neuron::forward_v2_1(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> Tensor _0"
  7307. },
  7308. {
  7309. "name": "neuron::forward_v2_10(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9)"
  7310. },
  7311. {
  7312. "name": "neuron::forward_v2_11(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10)"
  7313. },
  7314. {
  7315. "name": "neuron::forward_v2_12(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11)"
  7316. },
  7317. {
  7318. "name": "neuron::forward_v2_13(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12)"
  7319. },
  7320. {
  7321. "name": "neuron::forward_v2_14(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13)"
  7322. },
  7323. {
  7324. "name": "neuron::forward_v2_15(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14)"
  7325. },
  7326. {
  7327. "name": "neuron::forward_v2_16(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15)"
  7328. },
  7329. {
  7330. "name": "neuron::forward_v2_17(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16)"
  7331. },
  7332. {
  7333. "name": "neuron::forward_v2_18(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17)"
  7334. },
  7335. {
  7336. "name": "neuron::forward_v2_19(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18)"
  7337. },
  7338. {
  7339. "name": "neuron::forward_v2_2(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1)"
  7340. },
  7341. {
  7342. "name": "neuron::forward_v2_20(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19)"
  7343. },
  7344. {
  7345. "name": "neuron::forward_v2_21(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20)"
  7346. },
  7347. {
  7348. "name": "neuron::forward_v2_22(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21)"
  7349. },
  7350. {
  7351. "name": "neuron::forward_v2_23(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22)"
  7352. },
  7353. {
  7354. "name": "neuron::forward_v2_24(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23)"
  7355. },
  7356. {
  7357. "name": "neuron::forward_v2_25(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24)"
  7358. },
  7359. {
  7360. "name": "neuron::forward_v2_26(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25)"
  7361. },
  7362. {
  7363. "name": "neuron::forward_v2_27(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26)"
  7364. },
  7365. {
  7366. "name": "neuron::forward_v2_28(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27)"
  7367. },
  7368. {
  7369. "name": "neuron::forward_v2_29(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28)"
  7370. },
  7371. {
  7372. "name": "neuron::forward_v2_3(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2)"
  7373. },
  7374. {
  7375. "name": "neuron::forward_v2_30(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29)"
  7376. },
  7377. {
  7378. "name": "neuron::forward_v2_31(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30)"
  7379. },
  7380. {
  7381. "name": "neuron::forward_v2_32(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31)"
  7382. },
  7383. {
  7384. "name": "neuron::forward_v2_33(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32)"
  7385. },
  7386. {
  7387. "name": "neuron::forward_v2_35(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34)"
  7388. },
  7389. {
  7390. "name": "neuron::forward_v2_36(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35)"
  7391. },
  7392. {
  7393. "name": "neuron::forward_v2_37(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36)"
  7394. },
  7395. {
  7396. "name": "neuron::forward_v2_38(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37)"
  7397. },
  7398. {
  7399. "name": "neuron::forward_v2_39(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38)"
  7400. },
  7401. {
  7402. "name": "neuron::forward_v2_4(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  7403. },
  7404. {
  7405. "name": "neuron::forward_v2_40(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39)"
  7406. },
  7407. {
  7408. "name": "neuron::forward_v2_41(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40)"
  7409. },
  7410. {
  7411. "name": "neuron::forward_v2_42(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41)"
  7412. },
  7413. {
  7414. "name": "neuron::forward_v2_43(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42)"
  7415. },
  7416. {
  7417. "name": "neuron::forward_v2_44(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43)"
  7418. },
  7419. {
  7420. "name": "neuron::forward_v2_45(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44)"
  7421. },
  7422. {
  7423. "name": "neuron::forward_v2_46(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45)"
  7424. },
  7425. {
  7426. "name": "neuron::forward_v2_47(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46)"
  7427. },
  7428. {
  7429. "name": "neuron::forward_v2_48(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47)"
  7430. },
  7431. {
  7432. "name": "neuron::forward_v2_49(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48)"
  7433. },
  7434. {
  7435. "name": "neuron::forward_v2_5(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4)"
  7436. },
  7437. {
  7438. "name": "neuron::forward_v2_50(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49)"
  7439. },
  7440. {
  7441. "name": "neuron::forward_v2_51(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50)"
  7442. },
  7443. {
  7444. "name": "neuron::forward_v2_52(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51)"
  7445. },
  7446. {
  7447. "name": "neuron::forward_v2_53(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52)"
  7448. },
  7449. {
  7450. "name": "neuron::forward_v2_54(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53)"
  7451. },
  7452. {
  7453. "name": "neuron::forward_v2_55(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54)"
  7454. },
  7455. {
  7456. "name": "neuron::forward_v2_56(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55)"
  7457. },
  7458. {
  7459. "name": "neuron::forward_v2_57(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56)"
  7460. },
  7461. {
  7462. "name": "neuron::forward_v2_58(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57)"
  7463. },
  7464. {
  7465. "name": "neuron::forward_v2_59(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58)"
  7466. },
  7467. {
  7468. "name": "neuron::forward_v2_6(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  7469. },
  7470. {
  7471. "name": "neuron::forward_v2_60(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59)"
  7472. },
  7473. {
  7474. "name": "neuron::forward_v2_61(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60)"
  7475. },
  7476. {
  7477. "name": "neuron::forward_v2_62(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61)"
  7478. },
  7479. {
  7480. "name": "neuron::forward_v2_63(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62)"
  7481. },
  7482. {
  7483. "name": "neuron::forward_v2_64(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8, Tensor _9, Tensor _10, Tensor _11, Tensor _12, Tensor _13, Tensor _14, Tensor _15, Tensor _16, Tensor _17, Tensor _18, Tensor _19, Tensor _20, Tensor _21, Tensor _22, Tensor _23, Tensor _24, Tensor _25, Tensor _26, Tensor _27, Tensor _28, Tensor _29, Tensor _30, Tensor _31, Tensor _32, Tensor _33, Tensor _34, Tensor _35, Tensor _36, Tensor _37, Tensor _38, Tensor _39, Tensor _40, Tensor _41, Tensor _42, Tensor _43, Tensor _44, Tensor _45, Tensor _46, Tensor _47, Tensor _48, Tensor _49, Tensor _50, Tensor _51, Tensor _52, Tensor _53, Tensor _54, Tensor _55, Tensor _56, Tensor _57, Tensor _58, Tensor _59, Tensor _60, Tensor _61, Tensor _62, Tensor _63)"
  7484. },
  7485. {
  7486. "name": "neuron::forward_v2_7(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6)"
  7487. },
  7488. {
  7489. "name": "neuron::forward_v2_8(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7)"
  7490. },
  7491. {
  7492. "name": "neuron::forward_v2_9(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5, Tensor _6, Tensor _7, Tensor _8)"
  7493. },
  7494. {
  7495. "name": "neuron::rnn(Tensor _0, Tensor[] _1, __torch__.torch.classes.neuron.RnnBinding _2, int _3) -> (Tensor _0, Tensor[] _1)"
  7496. },
  7497. {
  7498. "name": "neuron::rnn_v2(Tensor _0, Tensor _1, Tensor _2, int _3, __torch__.torch.classes.neuron.RnnBinding_v2[] _4) -> (Tensor _0, Tensor _1, Tensor _2)"
  7499. },
  7500. {
  7501. "name": "prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.Conv2dOpContext"
  7502. },
  7503. {
  7504. "name": "prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack) -> Tensor Y",
  7505. "category": "Layer"
  7506. },
  7507. {
  7508. "name": "prepacked::conv2d_transpose_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.TransposeConv2dOpContext"
  7509. },
  7510. {
  7511. "name": "prepacked::conv2d_transpose_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.TransposeConv2dOpContext W_prepack) -> Tensor Y",
  7512. "category": "Layer"
  7513. },
  7514. {
  7515. "name": "prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.LinearOpContext"
  7516. },
  7517. {
  7518. "name": "prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.LinearOpContext W_prepack) -> Tensor Y",
  7519. "category": "Layer"
  7520. },
  7521. {
  7522. "name": "prim::AutogradAdd(Any a, Any b) -> Any"
  7523. },
  7524. {
  7525. "name": "prim::AutogradAllNonZero(...) -> bool"
  7526. },
  7527. {
  7528. "name": "prim::AutogradAllZero(...) -> bool"
  7529. },
  7530. {
  7531. "name": "prim::AutogradAnyNonZero(...) -> bool"
  7532. },
  7533. {
  7534. "name": "prim::AutogradZero() -> Tensor"
  7535. },
  7536. {
  7537. "name": "prim::BroadcastSizes(...) -> int[]"
  7538. },
  7539. {
  7540. "name": "prim::ConstantChunk(...) -> ..."
  7541. },
  7542. {
  7543. "name": "prim::ConstantMKLDNNTensor(...) -> ..."
  7544. },
  7545. {
  7546. "name": "prim::EnumName(AnyEnumType enum) -> str"
  7547. },
  7548. {
  7549. "name": "prim::EnumValue.int(AnyEnumType enum) -> int"
  7550. },
  7551. {
  7552. "name": "prim::EnumValue.float(AnyEnumType enum) -> float"
  7553. },
  7554. {
  7555. "name": "prim::EnumValue.str(AnyEnumType enum) -> str"
  7556. },
  7557. {
  7558. "name": "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)"
  7559. },
  7560. {
  7561. "name": "prim::ModuleContainerIndex.list(Any self, int ind) -> Any"
  7562. },
  7563. {
  7564. "name": "prim::ModuleContainerIndex.dict(Any self, str ind) -> Any"
  7565. },
  7566. {
  7567. "name": "prim::NumToTensor.Scalar(Scalar a) -> Tensor"
  7568. },
  7569. {
  7570. "name": "prim::NumToTensor.bool(bool a) -> Tensor"
  7571. },
  7572. {
  7573. "name": "prim::Print(...) -> ()"
  7574. },
  7575. {
  7576. "name": "prim::RaiseException(str msg, str? cls=None) -> ()"
  7577. },
  7578. {
  7579. "name": "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim=False) -> int[]"
  7580. },
  7581. {
  7582. "name": "prim::StringIndex(str string, int index) -> str"
  7583. },
  7584. {
  7585. "name": "prim::TupleIndex(Any tup, int i) -> Any"
  7586. },
  7587. {
  7588. "name": "prim::TupleUnpack(Any tup) -> ..."
  7589. },
  7590. {
  7591. "name": "prim::Uninitialized() -> Any"
  7592. },
  7593. {
  7594. "name": "prim::VarConcat(...) -> Tensor"
  7595. },
  7596. {
  7597. "name": "prim::VarStack(...) -> Tensor"
  7598. },
  7599. {
  7600. "name": "prim::abs.int(int a) -> int"
  7601. },
  7602. {
  7603. "name": "prim::abs.float(float a) -> float"
  7604. },
  7605. {
  7606. "name": "prim::abs.complex(complex a) -> float"
  7607. },
  7608. {
  7609. "name": "prim::abs.Scalar(Scalar a) -> Scalar"
  7610. },
  7611. {
  7612. "name": "prim::abs(Tensor x) -> Tensor"
  7613. },
  7614. {
  7615. "name": "prim::data(Tensor(a) a) -> Tensor(a)"
  7616. },
  7617. {
  7618. "name": "prim::device(Tensor a) -> Device"
  7619. },
  7620. {
  7621. "name": "prim::dtype(Tensor a) -> int"
  7622. },
  7623. {
  7624. "name": "prim::grad(Tensor a) -> Tensor(*)"
  7625. },
  7626. {
  7627. "name": "prim::id(AnyClassType? x) -> int"
  7628. },
  7629. {
  7630. "name": "prim::index(Device self) -> int?"
  7631. },
  7632. {
  7633. "name": "prim::is_cpu(Tensor a) -> bool"
  7634. },
  7635. {
  7636. "name": "prim::is_cuda(Tensor a) -> bool"
  7637. },
  7638. {
  7639. "name": "prim::is_ipu(Tensor a) -> bool"
  7640. },
  7641. {
  7642. "name": "prim::is_maia(Tensor a) -> bool"
  7643. },
  7644. {
  7645. "name": "prim::is_meta(Tensor a) -> bool"
  7646. },
  7647. {
  7648. "name": "prim::is_mkldnn(Tensor a) -> bool"
  7649. },
  7650. {
  7651. "name": "prim::is_mps(Tensor a) -> bool"
  7652. },
  7653. {
  7654. "name": "prim::is_mtia(Tensor a) -> bool"
  7655. },
  7656. {
  7657. "name": "prim::is_nested(Tensor a) -> bool"
  7658. },
  7659. {
  7660. "name": "prim::is_quantized(Tensor a) -> bool"
  7661. },
  7662. {
  7663. "name": "prim::is_sparse(Tensor a) -> bool"
  7664. },
  7665. {
  7666. "name": "prim::is_sparse_csr(Tensor a) -> bool"
  7667. },
  7668. {
  7669. "name": "prim::is_vulkan(Tensor a) -> bool"
  7670. },
  7671. {
  7672. "name": "prim::is_xla(Tensor a) -> bool"
  7673. },
  7674. {
  7675. "name": "prim::is_xpu(Tensor a) -> bool"
  7676. },
  7677. {
  7678. "name": "prim::isinstance(Any to_check) -> bool"
  7679. },
  7680. {
  7681. "name": "prim::itemsize(Tensor a) -> int"
  7682. },
  7683. {
  7684. "name": "prim::layout(Tensor a) -> Layout"
  7685. },
  7686. {
  7687. "name": "prim::max.int(int a, int b) -> int"
  7688. },
  7689. {
  7690. "name": "prim::max.float(float a, float b) -> float"
  7691. },
  7692. {
  7693. "name": "prim::max.int_float(int a, float b) -> float"
  7694. },
  7695. {
  7696. "name": "prim::max.float_int(float a, int b) -> float"
  7697. },
  7698. {
  7699. "name": "prim::max(Scalar a, Scalar b) -> Scalar"
  7700. },
  7701. {
  7702. "name": "prim::max.int_list(int[] l, int[] r) -> int[]"
  7703. },
  7704. {
  7705. "name": "prim::max.self_int(int[] self) -> int"
  7706. },
  7707. {
  7708. "name": "prim::max.float_list(float[] l, float[] r) -> float[]"
  7709. },
  7710. {
  7711. "name": "prim::max.self_float(float[] self) -> float"
  7712. },
  7713. {
  7714. "name": "prim::max.bool_list(bool[] l, bool[] r) -> bool[]"
  7715. },
  7716. {
  7717. "name": "prim::max.self_bool(bool[] self) -> bool"
  7718. },
  7719. {
  7720. "name": "prim::min.int(int a, int b) -> int"
  7721. },
  7722. {
  7723. "name": "prim::min.float(float a, float b) -> float"
  7724. },
  7725. {
  7726. "name": "prim::min.int_float(int a, float b) -> float"
  7727. },
  7728. {
  7729. "name": "prim::min.float_int(float a, int b) -> float"
  7730. },
  7731. {
  7732. "name": "prim::min(Scalar a, Scalar b) -> Scalar"
  7733. },
  7734. {
  7735. "name": "prim::min.int_list(int[] l, int[] r) -> int[]"
  7736. },
  7737. {
  7738. "name": "prim::min.self_int(int[] self) -> int"
  7739. },
  7740. {
  7741. "name": "prim::min.float_list(float[] l, float[] r) -> float[]"
  7742. },
  7743. {
  7744. "name": "prim::min.self_float(float[] self) -> float"
  7745. },
  7746. {
  7747. "name": "prim::min.bool_list(bool[] l, bool[] r) -> bool[]"
  7748. },
  7749. {
  7750. "name": "prim::min.self_bool(bool[] self) -> bool"
  7751. },
  7752. {
  7753. "name": "prim::mkldnn_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor"
  7754. },
  7755. {
  7756. "name": "prim::name(Tensor a) -> str?"
  7757. },
  7758. {
  7759. "name": "prim::nbytes(Tensor a) -> int"
  7760. },
  7761. {
  7762. "name": "prim::rangelist(int n) -> int[]"
  7763. },
  7764. {
  7765. "name": "prim::requires_grad(Tensor a) -> bool"
  7766. },
  7767. {
  7768. "name": "prim::shape(Tensor self) -> int[]"
  7769. },
  7770. {
  7771. "name": "prim::tolist(...) -> ..."
  7772. },
  7773. {
  7774. "name": "prim::type(Device self) -> str"
  7775. },
  7776. {
  7777. "name": "prim::unchecked_cast(t x) -> t"
  7778. },
  7779. {
  7780. "name": "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"
  7781. },
  7782. {
  7783. "name": "prims::collapse(Tensor a, int start, int end) -> Tensor"
  7784. },
  7785. {
  7786. "name": "profiler::_record_function_enter(str name, str? args=None) -> Tensor"
  7787. },
  7788. {
  7789. "name": "profiler::_record_function_enter_new(str name, str? args=None) -> __torch__.torch.classes.profiler._RecordFunction"
  7790. },
  7791. {
  7792. "name": "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  7793. },
  7794. {
  7795. "name": "quantized::add.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  7796. },
  7797. {
  7798. "name": "quantized::add.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  7799. },
  7800. {
  7801. "name": "quantized::add.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  7802. },
  7803. {
  7804. "name": "quantized::add.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  7805. },
  7806. {
  7807. "name": "quantized::add_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  7808. },
  7809. {
  7810. "name": "quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  7811. },
  7812. {
  7813. "name": "quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  7814. },
  7815. {
  7816. "name": "quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  7817. },
  7818. {
  7819. "name": "quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  7820. },
  7821. {
  7822. "name": "quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  7823. },
  7824. {
  7825. "name": "quantized::add_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  7826. },
  7827. {
  7828. "name": "quantized::add_scalar(Tensor qa, Scalar b) -> Tensor qc"
  7829. },
  7830. {
  7831. "name": "quantized::add_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  7832. },
  7833. {
  7834. "name": "quantized::add_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  7835. },
  7836. {
  7837. "name": "quantized::add_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  7838. },
  7839. {
  7840. "name": "quantized::add_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  7841. },
  7842. {
  7843. "name": "quantized::add_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  7844. },
  7845. {
  7846. "name": "quantized::add_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  7847. },
  7848. {
  7849. "name": "quantized::add_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  7850. },
  7851. {
  7852. "name": "quantized::batch_norm(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor"
  7853. },
  7854. {
  7855. "name": "quantized::batch_norm1d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7856. "category": "Normalization"
  7857. },
  7858. {
  7859. "name": "quantized::batch_norm1d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7860. "category": "Normalization"
  7861. },
  7862. {
  7863. "name": "quantized::batch_norm2d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7864. "category": "Normalization"
  7865. },
  7866. {
  7867. "name": "quantized::batch_norm2d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7868. "category": "Normalization"
  7869. },
  7870. {
  7871. "name": "quantized::batch_norm3d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7872. "category": "Normalization"
  7873. },
  7874. {
  7875. "name": "quantized::batch_norm3d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7876. "category": "Normalization"
  7877. },
  7878. {
  7879. "name": "quantized::batch_norm_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor",
  7880. "category": "Normalization"
  7881. },
  7882. {
  7883. "name": "quantized::cat(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  7884. "category": "Tensor"
  7885. },
  7886. {
  7887. "name": "quantized::cat_relu(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor",
  7888. "category": "Tensor"
  7889. },
  7890. {
  7891. "name": "quantized::celu(Tensor self, float output_scale, int output_zero_point, Scalar alpha=1) -> Tensor",
  7892. "category": "Activation"
  7893. },
  7894. {
  7895. "name": "quantized::conv1d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7896. "category": "Layer"
  7897. },
  7898. {
  7899. "name": "quantized::conv1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  7900. },
  7901. {
  7902. "name": "quantized::conv1d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7903. "category": "Layer"
  7904. },
  7905. {
  7906. "name": "quantized::conv1d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  7907. },
  7908. {
  7909. "name": "quantized::conv2d.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7910. "category": "Layer"
  7911. },
  7912. {
  7913. "name": "quantized::conv2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  7914. "category": "Layer"
  7915. },
  7916. {
  7917. "name": "quantized::conv2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7918. },
  7919. {
  7920. "name": "quantized::conv2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  7921. },
  7922. {
  7923. "name": "quantized::conv2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  7924. },
  7925. {
  7926. "name": "quantized::conv2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7927. },
  7928. {
  7929. "name": "quantized::conv2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7930. },
  7931. {
  7932. "name": "quantized::conv2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  7933. },
  7934. {
  7935. "name": "quantized::conv2d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7936. "category": "Layer"
  7937. },
  7938. {
  7939. "name": "quantized::conv2d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  7940. "category": "Layer"
  7941. },
  7942. {
  7943. "name": "quantized::conv2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7944. },
  7945. {
  7946. "name": "quantized::conv2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  7947. },
  7948. {
  7949. "name": "quantized::conv2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  7950. },
  7951. {
  7952. "name": "quantized::conv2d_unpack_sizes(Any packed_weights) -> Any"
  7953. },
  7954. {
  7955. "name": "quantized::conv3d.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7956. "category": "Layer"
  7957. },
  7958. {
  7959. "name": "quantized::conv3d(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  7960. "category": "Layer"
  7961. },
  7962. {
  7963. "name": "quantized::conv3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase"
  7964. },
  7965. {
  7966. "name": "quantized::conv3d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor",
  7967. "category": "Layer"
  7968. },
  7969. {
  7970. "name": "quantized::conv3d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor",
  7971. "category": "Layer"
  7972. },
  7973. {
  7974. "name": "quantized::conv_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  7975. },
  7976. {
  7977. "name": "quantized::conv_transpose1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  7978. },
  7979. {
  7980. "name": "quantized::conv_transpose2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor"
  7981. },
  7982. {
  7983. "name": "quantized::conv_transpose2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7984. },
  7985. {
  7986. "name": "quantized::conv_transpose2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor"
  7987. },
  7988. {
  7989. "name": "quantized::conv_transpose2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  7990. },
  7991. {
  7992. "name": "quantized::conv_transpose2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7993. },
  7994. {
  7995. "name": "quantized::conv_transpose2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  7996. },
  7997. {
  7998. "name": "quantized::conv_transpose2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase"
  7999. },
  8000. {
  8001. "name": "quantized::conv_transpose2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]"
  8002. },
  8003. {
  8004. "name": "quantized::conv_transpose2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int"
  8005. },
  8006. {
  8007. "name": "quantized::conv_transpose2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)"
  8008. },
  8009. {
  8010. "name": "quantized::conv_transpose3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase"
  8011. },
  8012. {
  8013. "name": "quantized::dropout(Tensor self, float output_scale, int output_zero_point, Scalar p=0.5, bool training=False) -> Tensor"
  8014. },
  8015. {
  8016. "name": "quantized::embedding_bag_4bit_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor",
  8017. "category": "Transform"
  8018. },
  8019. {
  8020. "name": "quantized::embedding_bag_byte_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor",
  8021. "category": "Transform"
  8022. },
  8023. {
  8024. "name": "quantized::embedding_bag_prepack(Tensor weight) -> __torch__.torch.classes.quantized.EmbeddingPackedParamsBase W_prepack"
  8025. },
  8026. {
  8027. "name": "quantized::embedding_bag_unpack(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase W_prepack) -> Tensor W_origin"
  8028. },
  8029. {
  8030. "name": "quantized::embedding_byte(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase weight, Tensor indices, bool pruned_weights=False) -> Tensor",
  8031. "category": "Transform"
  8032. },
  8033. {
  8034. "name": "quantized::hardswish(Tensor input, float output_scale, int output_zero_point) -> Tensor",
  8035. "category": "Activation"
  8036. },
  8037. {
  8038. "name": "quantized::instance_norm(Tensor input, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor"
  8039. },
  8040. {
  8041. "name": "quantized::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor",
  8042. "category": "Normalization"
  8043. },
  8044. {
  8045. "name": "quantized::leaky_relu(Tensor qx, Scalar negative_slope, bool inplace, float output_scale, int output_zero_point) -> Tensor",
  8046. "category": "Activation"
  8047. },
  8048. {
  8049. "name": "quantized::linear(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  8050. "category": "Layer"
  8051. },
  8052. {
  8053. "name": "quantized::linear_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  8054. "category": "Layer"
  8055. },
  8056. {
  8057. "name": "quantized::linear_dynamic_fp16(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> Tensor Y"
  8058. },
  8059. {
  8060. "name": "quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  8061. },
  8062. {
  8063. "name": "quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack"
  8064. },
  8065. {
  8066. "name": "quantized::linear_prepack_fp16_legacy(Tensor W, Tensor? B=None) -> Tensor W_prepack"
  8067. },
  8068. {
  8069. "name": "quantized::linear_relu(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y",
  8070. "category": "Layer"
  8071. },
  8072. {
  8073. "name": "quantized::linear_relu_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y",
  8074. "category": "Layer"
  8075. },
  8076. {
  8077. "name": "quantized::linear_unpack(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8078. },
  8079. {
  8080. "name": "quantized::linear_unpack.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8081. },
  8082. {
  8083. "name": "quantized::linear_unpack_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8084. },
  8085. {
  8086. "name": "quantized::linear_unpack_fp16.legacy(Tensor W_prepack) -> (Tensor W_origin, Tensor? B_origin)"
  8087. },
  8088. {
  8089. "name": "quantized::make_quantized_cell_params(Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  8090. },
  8091. {
  8092. "name": "quantized::make_quantized_cell_params_dynamic(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh, bool reduce_range=False) -> __torch__.torch.classes.rnn.CellParamsBase"
  8093. },
  8094. {
  8095. "name": "quantized::make_quantized_cell_params_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh) -> __torch__.torch.classes.rnn.CellParamsBase"
  8096. },
  8097. {
  8098. "name": "quantized::matmul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8099. },
  8100. {
  8101. "name": "quantized::mul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8102. },
  8103. {
  8104. "name": "quantized::mul.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8105. },
  8106. {
  8107. "name": "quantized::mul.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8108. },
  8109. {
  8110. "name": "quantized::mul.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8111. },
  8112. {
  8113. "name": "quantized::mul.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8114. },
  8115. {
  8116. "name": "quantized::mul_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8117. },
  8118. {
  8119. "name": "quantized::mul_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc"
  8120. },
  8121. {
  8122. "name": "quantized::mul_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8123. },
  8124. {
  8125. "name": "quantized::mul_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"
  8126. },
  8127. {
  8128. "name": "quantized::mul_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"
  8129. },
  8130. {
  8131. "name": "quantized::mul_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8132. },
  8133. {
  8134. "name": "quantized::mul_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out"
  8135. },
  8136. {
  8137. "name": "quantized::mul_scalar(Tensor qa, Scalar b) -> Tensor qc"
  8138. },
  8139. {
  8140. "name": "quantized::mul_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8141. },
  8142. {
  8143. "name": "quantized::mul_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8144. },
  8145. {
  8146. "name": "quantized::mul_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8147. },
  8148. {
  8149. "name": "quantized::mul_scalar_relu(Tensor qa, Scalar b) -> Tensor qc"
  8150. },
  8151. {
  8152. "name": "quantized::mul_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc"
  8153. },
  8154. {
  8155. "name": "quantized::mul_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out"
  8156. },
  8157. {
  8158. "name": "quantized::mul_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out"
  8159. },
  8160. {
  8161. "name": "quantized::prelu(Tensor qx, Tensor weight, float output_scale, int output_zero_point) -> Tensor"
  8162. },
  8163. {
  8164. "name": "quantized::quantized_gru_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8165. },
  8166. {
  8167. "name": "quantized::quantized_lstm_cell_dynamic(Tensor input, Tensor[] hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh) -> (Tensor, Tensor)"
  8168. },
  8169. {
  8170. "name": "quantized::quantized_rnn_relu_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8171. },
  8172. {
  8173. "name": "quantized::quantized_rnn_tanh_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor"
  8174. },
  8175. {
  8176. "name": "quantized::relu6(Tensor qx, bool inplace=False) -> Tensor",
  8177. "category": "Activation"
  8178. },
  8179. {
  8180. "name": "quantized::sigmoid(Tensor qx, float output_scale, int output_zero_point) -> Tensor",
  8181. "category": "Activation"
  8182. },
  8183. {
  8184. "name": "quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor"
  8185. },
  8186. {
  8187. "name": "quantized_decomposed::_choose_qparams_per_token_asymmetric_impl(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8188. },
  8189. {
  8190. "name": "quantized_decomposed::add(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, Tensor b, float b_scale, int b_zero_point, int b_quant_min, int b_quant_max, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max) -> Tensor qc"
  8191. },
  8192. {
  8193. "name": "quantized_decomposed::add.scalar(Tensor qa, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, ScalarType a_dtype, Scalar b, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max, ScalarType out_dtype) -> Tensor"
  8194. },
  8195. {
  8196. "name": "quantized_decomposed::add_relu(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, Tensor b, float b_scale, int b_zero_point, int b_quant_min, int b_quant_max, float out_scale, int out_zero_point, int out_quant_min, int out_quant_max) -> Tensor qc"
  8197. },
  8198. {
  8199. "name": "quantized_decomposed::choose_qparams.tensor(Tensor input, int quant_min, int quant_max, float eps, ScalarType dtype) -> (Tensor, Tensor)"
  8200. },
  8201. {
  8202. "name": "quantized_decomposed::choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8203. },
  8204. {
  8205. "name": "quantized_decomposed::choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
  8206. },
  8207. {
  8208. "name": "quantized_decomposed::choose_qparams_per_token_asymmetric.out(Tensor input, ScalarType dtype, *, Tensor(a!) scale_out, Tensor(b!) zero_point_out) -> (Tensor(a!), Tensor(b!))"
  8209. },
  8210. {
  8211. "name": "quantized_decomposed::choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, float eps, ScalarType dtype) -> (Tensor, Tensor)"
  8212. },
  8213. {
  8214. "name": "quantized_decomposed::dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8215. },
  8216. {
  8217. "name": "quantized_decomposed::dequantize_per_channel.out(Tensor input, Tensor scales, Tensor? zero_points, int axis, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8218. },
  8219. {
  8220. "name": "quantized_decomposed::dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
  8221. },
  8222. {
  8223. "name": "quantized_decomposed::dequantize_per_tensor(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8224. },
  8225. {
  8226. "name": "quantized_decomposed::dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8227. },
  8228. {
  8229. "name": "quantized_decomposed::dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor"
  8230. },
  8231. {
  8232. "name": "quantized_decomposed::dequantize_per_tensor.Tensor_out(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8233. },
  8234. {
  8235. "name": "quantized_decomposed::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)"
  8236. },
  8237. {
  8238. "name": "quantized_decomposed::dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
  8239. },
  8240. {
  8241. "name": "quantized_decomposed::embedding_4bit(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices) -> Tensor",
  8242. "category": "Transform"
  8243. },
  8244. {
  8245. "name": "quantized_decomposed::embedding_4bit.dtype(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None) -> Tensor",
  8246. "category": "Transform"
  8247. },
  8248. {
  8249. "name": "quantized_decomposed::embedding_4bit.out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, Tensor(a!) out) -> Tensor(a!)",
  8250. "category": "Transform"
  8251. },
  8252. {
  8253. "name": "quantized_decomposed::embedding_4bit.dtype_out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)",
  8254. "category": "Transform"
  8255. },
  8256. {
  8257. "name": "quantized_decomposed::embedding_byte.dtype_out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)",
  8258. "category": "Transform"
  8259. },
  8260. {
  8261. "name": "quantized_decomposed::embedding_byte(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices) -> Tensor",
  8262. "category": "Transform"
  8263. },
  8264. {
  8265. "name": "quantized_decomposed::embedding_byte.dtype(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, ScalarType? dtype=None) -> Tensor",
  8266. "category": "Transform"
  8267. },
  8268. {
  8269. "name": "quantized_decomposed::embedding_byte.out(Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, int weight_quant_min, int weight_quant_max, Tensor indices, *, Tensor(a!) out) -> Tensor(a!)",
  8270. "category": "Transform"
  8271. },
  8272. {
  8273. "name": "quantized_decomposed::fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, int quant_min, int quant_max) -> Tensor"
  8274. },
  8275. {
  8276. "name": "quantized_decomposed::mixed_linear(Tensor input, Tensor weight, Tensor weight_scales, Tensor? weight_zero_points, ScalarType? dtype=None) -> Tensor"
  8277. },
  8278. {
  8279. "name": "quantized_decomposed::mixed_mm(Tensor input, Tensor weight, Tensor weight_scales, Tensor? weight_zero_points) -> Tensor"
  8280. },
  8281. {
  8282. "name": "quantized_decomposed::quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8283. },
  8284. {
  8285. "name": "quantized_decomposed::quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype, int group_size) -> Tensor"
  8286. },
  8287. {
  8288. "name": "quantized_decomposed::quantize_per_tensor(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8289. },
  8290. {
  8291. "name": "quantized_decomposed::quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8292. },
  8293. {
  8294. "name": "quantized_decomposed::quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor"
  8295. },
  8296. {
  8297. "name": "quantized_decomposed::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)"
  8298. },
  8299. {
  8300. "name": "quantized_decomposed::quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
  8301. },
  8302. {
  8303. "name": "sgl_kernel::extend_attention_cpu(Tensor q_extend, Tensor k_extend, Tensor v_extend, Tensor(a!) o_extend, Tensor k_buffer, Tensor v_buffer, Tensor req_to_token, Tensor req_pool_indices, Tensor seq_lens, Tensor extend_seq_lens, Tensor extend_start_loc, int max_len_extend, float sm_scale, float logit_cap) -> ()"
  8304. },
  8305. {
  8306. "name": "tensorrt::execute_engine(Tensor[] inputs, __torch__.torch.classes.tensorrt.Engine engine) -> Tensor[]"
  8307. },
  8308. {
  8309. "name": "torch_scatter::cuda_version() -> int _0"
  8310. },
  8311. {
  8312. "name": "torch_scatter::gather_coo(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8313. },
  8314. {
  8315. "name": "torch_scatter::gather_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8316. },
  8317. {
  8318. "name": "torch_scatter::scatter_max(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  8319. },
  8320. {
  8321. "name": "torch_scatter::scatter_mean(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8322. },
  8323. {
  8324. "name": "torch_scatter::scatter_min(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> (Tensor _0, Tensor _1)"
  8325. },
  8326. {
  8327. "name": "torch_scatter::scatter_mul(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8328. },
  8329. {
  8330. "name": "torch_scatter::scatter_sum(Tensor _0, Tensor _1, int _2, Tensor? _3, int? _4) -> Tensor _0"
  8331. },
  8332. {
  8333. "name": "torch_scatter::segment_max_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  8334. },
  8335. {
  8336. "name": "torch_scatter::segment_max_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  8337. },
  8338. {
  8339. "name": "torch_scatter::segment_mean_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  8340. },
  8341. {
  8342. "name": "torch_scatter::segment_mean_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8343. },
  8344. {
  8345. "name": "torch_scatter::segment_min_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> (Tensor _0, Tensor _1)"
  8346. },
  8347. {
  8348. "name": "torch_scatter::segment_min_csr(Tensor _0, Tensor _1, Tensor? _2) -> (Tensor _0, Tensor _1)"
  8349. },
  8350. {
  8351. "name": "torch_scatter::segment_sum_coo(Tensor _0, Tensor _1, Tensor? _2, int? _3) -> Tensor _0"
  8352. },
  8353. {
  8354. "name": "torch_scatter::segment_sum_csr(Tensor _0, Tensor _1, Tensor? _2) -> Tensor _0"
  8355. },
  8356. {
  8357. "name": "torch_sparse::cuda_version() -> int _0"
  8358. },
  8359. {
  8360. "name": "torch_sparse::ego_k_hop_sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, int _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3, Tensor _4, Tensor _5)"
  8361. },
  8362. {
  8363. "name": "torch_sparse::hetero_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, int _6, bool _7, bool _8) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8364. },
  8365. {
  8366. "name": "torch_sparse::hetero_temporal_neighbor_sample(str[] _0, (str, str, str)[] _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3, Dict(str, Tensor) _4, Dict(str, int[]) _5, Dict(str, Tensor) _6, int _7, bool _8, bool _9) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8367. },
  8368. {
  8369. "name": "torch_sparse::hgt_sample(Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, int[]) _3, int _4) -> (Dict(str, Tensor) _0, Dict(str, Tensor) _1, Dict(str, Tensor) _2, Dict(str, Tensor) _3)"
  8370. },
  8371. {
  8372. "name": "torch_sparse::ind2ptr(Tensor _0, int _1) -> Tensor _0"
  8373. },
  8374. {
  8375. "name": "torch_sparse::mt_partition(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5, int _6) -> Tensor _0"
  8376. },
  8377. {
  8378. "name": "torch_sparse::neighbor_sample(Tensor _0, Tensor _1, Tensor _2, int[] _3, bool _4, bool _5) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  8379. },
  8380. {
  8381. "name": "torch_sparse::non_diag_mask(Tensor _0, Tensor _1, int _2, int _3, int _4) -> Tensor _0"
  8382. },
  8383. {
  8384. "name": "torch_sparse::partition(Tensor _0, Tensor _1, Tensor? _2, int _3, bool _4) -> Tensor _0"
  8385. },
  8386. {
  8387. "name": "torch_sparse::partition2(Tensor _0, Tensor _1, Tensor? _2, Tensor? _3, int _4, bool _5) -> Tensor _0"
  8388. },
  8389. {
  8390. "name": "torch_sparse::ptr2ind(Tensor _0, int _1) -> Tensor _0"
  8391. },
  8392. {
  8393. "name": "torch_sparse::random_walk(Tensor _0, Tensor _1, Tensor _2, int _3) -> Tensor _0"
  8394. },
  8395. {
  8396. "name": "torch_sparse::relabel(Tensor _0, Tensor _1) -> (Tensor _0, Tensor _1)"
  8397. },
  8398. {
  8399. "name": "torch_sparse::relabel_one_hop(Tensor _0, Tensor _1, Tensor? _2, Tensor _3, bool _4) -> (Tensor _0, Tensor _1, Tensor? _2, Tensor _3)"
  8400. },
  8401. {
  8402. "name": "torch_sparse::saint_subgraph(Tensor _0, Tensor _1, Tensor _2, Tensor _3) -> (Tensor _0, Tensor _1, Tensor _2)"
  8403. },
  8404. {
  8405. "name": "torch_sparse::sample_adj(Tensor _0, Tensor _1, Tensor _2, int _3, bool _4) -> (Tensor _0, Tensor _1, Tensor _2, Tensor _3)"
  8406. },
  8407. {
  8408. "name": "torch_sparse::spmm_max(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  8409. },
  8410. {
  8411. "name": "torch_sparse::spmm_mean(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor? _6, Tensor _7) -> Tensor _0"
  8412. },
  8413. {
  8414. "name": "torch_sparse::spmm_min(Tensor _0, Tensor _1, Tensor? _2, Tensor _3) -> (Tensor _0, Tensor _1)"
  8415. },
  8416. {
  8417. "name": "torch_sparse::spmm_sum(Tensor? _0, Tensor _1, Tensor _2, Tensor? _3, Tensor? _4, Tensor? _5, Tensor _6) -> Tensor _0"
  8418. },
  8419. {
  8420. "name": "torchao::choose_qparams_affine(Tensor? input, str mapping_type, SymInt[] block_size, ScalarType target_dtype, Scalar? quant_min=None, Scalar? quant_max=None, float? eps=None, ScalarType? scale_dtype=None, ScalarType? zero_point_dtype=None, bool keepdim=False) -> (Tensor, Tensor)"
  8421. },
  8422. {
  8423. "name": "torchao::dequantize_affine(Tensor input, SymInt[] block_size, Tensor scale, Tensor? zero_point, ScalarType input_dtype, Scalar? quant_min=None, Scalar? quant_max=None, ScalarType output_dtype=6) -> Tensor"
  8424. },
  8425. {
  8426. "name": "torchao::quantize_affine(Tensor input, SymInt[] block_size, Tensor scale, Tensor? zero_point, ScalarType output_dtype, Scalar? quant_min=None, Scalar? quant_max=None) -> Tensor"
  8427. },
  8428. {
  8429. "name": "torchaudio::forced_align(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> (Tensor, Tensor)"
  8430. },
  8431. {
  8432. "name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)"
  8433. },
  8434. {
  8435. "name": "torchvision::_interpolate_bilinear2d_aa(Tensor input, int[] size, bool align_corners) -> Tensor"
  8436. },
  8437. {
  8438. "name": "torchvision::deform_conv2d(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, SymInt stride_h, SymInt stride_w, SymInt pad_h, SymInt pad_w, SymInt dilation_h, SymInt dilation_w, SymInt groups, SymInt offset_groups, bool use_mask) -> Tensor"
  8439. },
  8440. {
  8441. "name": "torchvision::deform_conv2d.out(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, SymInt stride_h, SymInt stride_w, SymInt pad_h, SymInt pad_w, SymInt dilation_h, SymInt dilation_w, SymInt groups, SymInt offset_groups, bool use_mask, *, Tensor(a!) out) -> Tensor(a!)"
  8442. },
  8443. {
  8444. "name": "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor"
  8445. },
  8446. {
  8447. "name": "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, SymInt pooled_height, SymInt pooled_width, int sampling_ratio, bool aligned) -> Tensor"
  8448. },
  8449. {
  8450. "name": "torchvision::roi_pool(Tensor input, Tensor rois, float spatial_scale, SymInt pooled_height, SymInt pooled_width) -> (Tensor, Tensor)"
  8451. },
  8452. {
  8453. "name": "vai::fix_neuron(Tensor input, int valmin, int valmax, float valamp, int zero_point, int method, int device_id, int inplace) -> Tensor"
  8454. },
  8455. {
  8456. "name": "__torch__.torch.classes.rnn.CellParamsBase",
  8457. "inputs": [
  8458. { "name": "type", "type": "string" },
  8459. { "name": "tensors", "type": "Tensor[]" },
  8460. { "name": "doubles", "type": "float64[]" },
  8461. { "name": "longs", "type": "int64[]" },
  8462. { "name": "packed_params", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase[]" }
  8463. ]
  8464. },
  8465. {
  8466. "name": "__torch__.torch.classes.xnnpack.Conv2dOpContext",
  8467. "inputs": [
  8468. { "name": "weight", "type": "Tensor" },
  8469. { "name": "bias", "type": "Tensor", "optional": true },
  8470. { "name": "stride", "type": "int64[]" },
  8471. { "name": "padding", "type": "int64[]" },
  8472. { "name": "dilation", "type": "int64[]" },
  8473. { "name": "groups", "type": "int64" },
  8474. { "name": "output_min", "type": "int64[]", "optional": true },
  8475. { "name": "output_max", "type": "int64[]", "optional": true }
  8476. ]
  8477. },
  8478. {
  8479. "name": "__torch__.torch.classes.xnnpack.LinearOpContext",
  8480. "inputs": [
  8481. { "name": "weight", "type": "Tensor" },
  8482. { "name": "bias", "type": "Tensor", "optional": true },
  8483. { "name": "output_min", "type": "int64[]", "optional": true },
  8484. { "name": "output_max", "type": "int64[]", "optional": true }
  8485. ]
  8486. },
  8487. {
  8488. "name": "torch.nn.modules.activation.ELU",
  8489. "category": "Activation"
  8490. },
  8491. {
  8492. "name": "torch.nn.modules.activation.GELU",
  8493. "category": "Activation"
  8494. },
  8495. {
  8496. "name": "torch.nn.modules.activation.GLU",
  8497. "category": "Activation"
  8498. },
  8499. {
  8500. "name": "torch.nn.modules.activation.Hardsigmoid",
  8501. "category": "Activation"
  8502. },
  8503. {
  8504. "name": "torch.nn.modules.activation.Hardswish",
  8505. "category": "Activation"
  8506. },
  8507. {
  8508. "name": "torch.nn.modules.activation.Hardtanh",
  8509. "category": "Activation"
  8510. },
  8511. {
  8512. "name": "torch.nn.modules.activation.LeakyReLU",
  8513. "category": "Activation"
  8514. },
  8515. {
  8516. "name": "torch.nn.modules.activation.LogSoftmax",
  8517. "category": "Activation"
  8518. },
  8519. {
  8520. "name": "torch.nn.modules.activation.PReLU",
  8521. "category": "Activation"
  8522. },
  8523. {
  8524. "name": "torch.nn.modules.activation.ReLU",
  8525. "category": "Activation",
  8526. "inputs": [
  8527. { "name": "inplace", "default": false, "visible": false },
  8528. { "name": "threshold", "default": 0 },
  8529. { "name": "value", "default": 0 }
  8530. ]
  8531. },
  8532. {
  8533. "name": "torch.nn.modules.activation.ReLU6",
  8534. "category": "Activation"
  8535. },
  8536. {
  8537. "name": "torch.nn.modules.activation.SiLU",
  8538. "category": "Activation"
  8539. },
  8540. {
  8541. "name": "torch.nn.modules.activation.Sigmoid",
  8542. "category": "Activation"
  8543. },
  8544. {
  8545. "name": "torch.nn.modules.activation.Softmax",
  8546. "category": "Activation"
  8547. },
  8548. {
  8549. "name": "torch.nn.modules.activation.Softmax2d",
  8550. "category": "Activation"
  8551. },
  8552. {
  8553. "name": "torch.nn.modules.activation.Softplus",
  8554. "category": "Activation"
  8555. },
  8556. {
  8557. "name": "torch.nn.modules.activation.Tanh",
  8558. "category": "Activation"
  8559. },
  8560. {
  8561. "name": "torch.nn.modules.batchnorm.BatchNorm1d",
  8562. "category": "Normalization"
  8563. },
  8564. {
  8565. "name": "torch.nn.modules.batchnorm.BatchNorm2d",
  8566. "category": "Normalization",
  8567. "inputs": [
  8568. { "name": "input" },
  8569. { "name": "weight" },
  8570. { "name": "bias" },
  8571. { "name": "running_mean" },
  8572. { "name": "running_var" },
  8573. { "name": "num_batches_tracked", "visible": false },
  8574. { "name": "eps", "default": 1e-05 },
  8575. { "name": "momentum", "default": 0.1 },
  8576. { "name": "affine", "default": true },
  8577. { "name": "track_running_stats", "default": true }
  8578. ]
  8579. },
  8580. {
  8581. "name": "torch.nn.modules.conv.Conv1d",
  8582. "category": "Layer",
  8583. "inputs": [
  8584. { "name": "input" },
  8585. { "name": "weight" },
  8586. { "name": "bias" },
  8587. { "name": "output_padding", "visible": false },
  8588. { "name": "in_channels", "visible": false },
  8589. { "name": "out_channels", "visible": false },
  8590. { "name": "groups", "default": 1 },
  8591. { "name": "transposed", "default": false },
  8592. { "name": "padding", "default": [ 0 ] },
  8593. { "name": "dilation", "default": [ 1 ] },
  8594. { "name": "stride", "default": [ 1 ] }
  8595. ]
  8596. },
  8597. {
  8598. "name": "torch.nn.modules.conv.Conv2d",
  8599. "category": "Layer",
  8600. "inputs": [
  8601. { "name": "input" },
  8602. { "name": "weight" },
  8603. { "name": "bias" },
  8604. { "name": "output_padding", "visible": false },
  8605. { "name": "in_channels", "visible": false },
  8606. { "name": "out_channels", "visible": false },
  8607. { "name": "groups", "default": 1 },
  8608. { "name": "transposed", "default": false },
  8609. { "name": "padding", "default": [ 0, 0 ] },
  8610. { "name": "dilation", "default": [ 1, 1 ] },
  8611. { "name": "stride", "default": [ 1, 1 ] }
  8612. ]
  8613. },
  8614. {
  8615. "name": "torch.nn.modules.conv.Conv3d",
  8616. "category": "Layer"
  8617. },
  8618. {
  8619. "name": "torch.nn.modules.conv.ConvTranspose1d",
  8620. "category": "Layer",
  8621. "inputs": [
  8622. { "name": "input" },
  8623. { "name": "weight" },
  8624. { "name": "bias" },
  8625. { "name": "output_padding", "visible": false },
  8626. { "name": "in_channels", "visible": false },
  8627. { "name": "out_channels", "visible": false },
  8628. { "name": "groups", "default": 1 },
  8629. { "name": "transposed", "default": true },
  8630. { "name": "padding", "default": [ 0 ] },
  8631. { "name": "dilation", "default": [ 1 ] },
  8632. { "name": "stride", "default": [ 1 ] }
  8633. ]
  8634. },
  8635. {
  8636. "name": "torch.nn.modules.conv.ConvTranspose2d",
  8637. "category": "Layer",
  8638. "inputs": [
  8639. { "name": "input" },
  8640. { "name": "weight" },
  8641. { "name": "bias" },
  8642. { "name": "output_padding", "visible": false },
  8643. { "name": "in_channels", "visible": false },
  8644. { "name": "out_channels", "visible": false },
  8645. { "name": "groups", "default": 1 },
  8646. { "name": "transposed", "default": true },
  8647. { "name": "padding", "default": [ 0, 0 ] },
  8648. { "name": "dilation", "default": [ 1, 1 ] },
  8649. { "name": "stride", "default": [ 1, 1 ] }
  8650. ]
  8651. },
  8652. {
  8653. "name": "torch.nn.modules.conv.ConvTranspose3d",
  8654. "category": "Layer"
  8655. },
  8656. {
  8657. "name": "torch.nn.modules.dropout.Dropout",
  8658. "category": "Dropout",
  8659. "inputs": [
  8660. { "name": "inplace", "default": false, "visible": false },
  8661. { "name": "p", "default": 0.5 }
  8662. ]
  8663. },
  8664. {
  8665. "name": "torch.nn.modules.dropout.Dropout2d",
  8666. "category": "Dropout",
  8667. "inputs": [
  8668. { "name": "inplace", "default": false, "visible": false },
  8669. { "name": "p", "default": 0.5 }
  8670. ]
  8671. },
  8672. {
  8673. "name": "torch.nn.modules.instancenorm.InstanceNorm1d"
  8674. },
  8675. {
  8676. "name": "torch.nn.modules.instancenorm.InstanceNorm2d"
  8677. },
  8678. {
  8679. "name": "torch.nn.modules.instancenorm.InstanceNorm3d"
  8680. },
  8681. {
  8682. "name": "torch.nn.modules.linear.Linear",
  8683. "category": "Layer"
  8684. },
  8685. {
  8686. "name": "torch.nn.modules.normalization.CrossMapLRN2d",
  8687. "category": "Normalization",
  8688. "inputs": [
  8689. { "name": "alpha", "default": 0.0001 },
  8690. { "name": "beta", "default": 0.75 },
  8691. { "name": "k", "default": 1 }
  8692. ]
  8693. },
  8694. {
  8695. "name": "torch.nn.modules.normalization.GroupNorm",
  8696. "category": "Normalization"
  8697. },
  8698. {
  8699. "name": "torch.nn.modules.normalization.LayerNorm",
  8700. "category": "Normalization"
  8701. },
  8702. {
  8703. "name": "torch.nn.modules.padding.ConstantPad1d",
  8704. "category": "Tensor"
  8705. },
  8706. {
  8707. "name": "torch.nn.modules.padding.ConstantPad2d",
  8708. "category": "Tensor"
  8709. },
  8710. {
  8711. "name": "torch.nn.modules.padding.ConstantPad3d",
  8712. "category": "Tensor"
  8713. },
  8714. {
  8715. "name": "torch.nn.modules.padding.ReflectionPad1d",
  8716. "category": "Tensor"
  8717. },
  8718. {
  8719. "name": "torch.nn.modules.padding.ReflectionPad2d",
  8720. "category": "Tensor"
  8721. },
  8722. {
  8723. "name": "torch.nn.modules.padding.ReplicationPad1d",
  8724. "category": "Tensor"
  8725. },
  8726. {
  8727. "name": "torch.nn.modules.padding.ReplicationPad2d",
  8728. "category": "Tensor"
  8729. },
  8730. {
  8731. "name": "torch.nn.modules.padding.ReplicationPad3d",
  8732. "category": "Tensor"
  8733. },
  8734. {
  8735. "name": "torch.nn.modules.padding.ZeroPad2d",
  8736. "category": "Tensor"
  8737. },
  8738. {
  8739. "name": "torch.nn.modules.pixelshuffle.PixelShuffle"
  8740. },
  8741. {
  8742. "name": "torch.nn.modules.pooling.AdaptiveAvgPool1d",
  8743. "category": "Pool"
  8744. },
  8745. {
  8746. "name": "torch.nn.modules.pooling.AdaptiveAvgPool2d",
  8747. "category": "Pool"
  8748. },
  8749. {
  8750. "name": "torch.nn.modules.pooling.AdaptiveAvgPool3d",
  8751. "category": "Pool"
  8752. },
  8753. {
  8754. "name": "torch.nn.modules.pooling.AdaptiveMaxPool1d",
  8755. "category": "Pool"
  8756. },
  8757. {
  8758. "name": "torch.nn.modules.pooling.AdaptiveMaxPool2d",
  8759. "category": "Pool"
  8760. },
  8761. {
  8762. "name": "torch.nn.modules.pooling.AdaptiveMaxPool3d",
  8763. "category": "Pool"
  8764. },
  8765. {
  8766. "name": "torch.nn.modules.pooling.AvgPool2d",
  8767. "category": "Pool",
  8768. "inputs": [
  8769. { "name": "padding", "default": 0 },
  8770. { "name": "count_include_pad", "default": true },
  8771. { "name": "ceil_mode", "visible": false }
  8772. ]
  8773. },
  8774. {
  8775. "name": "torch.nn.modules.pooling.AvgPool3d",
  8776. "category": "Pool"
  8777. },
  8778. {
  8779. "name": "torch.nn.modules.pooling.MaxPool1d",
  8780. "category": "Pool"
  8781. },
  8782. {
  8783. "name": "torch.nn.modules.pooling.MaxPool2d",
  8784. "category": "Pool",
  8785. "inputs": [
  8786. { "name": "input" },
  8787. { "name": "padding", "default": 0 },
  8788. { "name": "dilation", "default": 1 },
  8789. { "name": "return_indices", "default": false },
  8790. { "name": "ceil_mode", "visible": false }
  8791. ]
  8792. },
  8793. {
  8794. "name": "torch.nn.modules.pooling.MaxPool3d",
  8795. "category": "Pool"
  8796. },
  8797. {
  8798. "name": "torch.nn.modules.pooling.MaxUnpool1d",
  8799. "category": "Pool"
  8800. },
  8801. {
  8802. "name": "torch.nn.modules.pooling.MaxUnpool2d",
  8803. "category": "Pool"
  8804. },
  8805. {
  8806. "name": "torch.nn.modules.pooling.MaxUnpool3d",
  8807. "category": "Pool"
  8808. },
  8809. {
  8810. "name": "torch.nn.modules.rnn.GRU",
  8811. "category": "Layer"
  8812. },
  8813. {
  8814. "name": "torch.nn.modules.rnn.GRUCell",
  8815. "category": "Layer"
  8816. },
  8817. {
  8818. "name": "torch.nn.modules.rnn.LSTM",
  8819. "category": "Layer",
  8820. "inputs": [
  8821. { "name": "input" },
  8822. { "name": "weight_ih_l0", "visible": false },
  8823. { "name": "weight_hh_l0", "visible": false },
  8824. { "name": "bias_ih_l0", "visible": false },
  8825. { "name": "bias_hh_l0", "visible": false },
  8826. { "name": "weight_ih_l1", "visible": false },
  8827. { "name": "weight_hh_l1", "visible": false },
  8828. { "name": "bias_ih_l1", "visible": false },
  8829. { "name": "bias_hh_l1", "visible": false },
  8830. { "name": "dropout", "default": 0 },
  8831. { "name": "dropout_state", "default": {} },
  8832. { "name": "num_layers", "default": 1 },
  8833. { "name": "batch_first", "visible": false },
  8834. { "name": "bidirectional", "visible": false },
  8835. { "name": "bias", "visible": false }
  8836. ]
  8837. },
  8838. {
  8839. "name": "torch.nn.modules.rnn.LSTMCell",
  8840. "category": "Layer"
  8841. },
  8842. {
  8843. "name": "torch.nn.modules.rnn.RNN",
  8844. "category": "Layer"
  8845. },
  8846. {
  8847. "name": "torch.nn.modules.sparse.Embedding",
  8848. "category": "Transform",
  8849. "inputs": [
  8850. { "name": "norm_type", "default": 2 },
  8851. { "name": "scale_grad_by_freq", "default": false },
  8852. { "name": "sparse", "default": false },
  8853. { "name": "max_norm", "default": null },
  8854. { "name": "padding_idx", "default": null }
  8855. ]
  8856. },
  8857. {
  8858. "name": "torch.nn.modules.upsampling.Upsample",
  8859. "category": "Data"
  8860. }
  8861. ]