| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014 |
- [
- {
- "name": "Affine",
- "description": "Affine layer, also called as the fully connected layer. It calculates:\n\n.. math::\n {\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}.\n\nwhere :math:`{\\mathbf x}` is the input and :math:`{\\mathbf y}` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape (:math:`M_0 \\times ... \\times M_{B-1} \\times D_B \\times ... \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight matrix with shape (:math:`(D_B \\times ... \\times D_N) \\times L_{0} \\times \\ldots \\times L_{I}`)"
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`L_{0} \\times \\ldots \\times L_{I}`)"
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Base axis of Affine operation. Dimensions up to base_axis is treated as sample dimension."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1)`-D array. (:math:`M_0 \\times ... \\times M_{B-1} \\times L_{0} \\times \\ldots \\times L_{I}`)"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "RNN",
- "description": "RNN function implements Elman RNN with nonlinearity to input sequence.\nRNN function is defined as following:\n\n.. math::\n {\\mathbf h_t} = {\\mathbf \\tanh}( {\\mathbf w_{ih}} *{\\mathbf x_t} + {\\mathbf b_{ih}} + {\\mathbf w_{hh}}* {\\mathbf h_{(t-1)}} + {\\mathbf b_{hh}}).\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n * `Jeffrey Elman, Finding Structure in Time. <https://crl.ucsd.edu/~elman/Papers/fsit.pdf>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(T, B, I)`."
- },
- {
- "name": "h",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(L, D, B, H)`."
- },
- {
- "name": "weight_l0",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(D, H, I + H)`."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Input N-D array with shape :math:`(L-1, D, H, D * H + H)`."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Input N-D array with shape :math:`(L, D, H)`."
- }
- ],
- "attributes": [
- {
- "name": "num_layers",
- "type": "int64",
- "default": 1,
- "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1."
- },
- {
- "name": "nonlinearity",
- "type": "string",
- "default": "tanh",
- "description": "Type of nonlinearity applied to input sequcne. Must be either tanh or relu. Default is tanh."
- },
- {
- "name": "dropout",
- "type": "float32",
- "default": 0.0,
- "description": "Dropout ratio applied to parameters. Default is 0.0."
- },
- {
- "name": "bidirectional",
- "type": "boolean",
- "default": false,
- "description": "If True, bidirectional computation will be performed in each layer. Default is False."
- },
- {
- "name": "training",
- "type": "boolean",
- "default": true,
- "description": "Backpropagation will be performed only when it is true. Default is True."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output :math:`y` with shape :math:`(T, B, D * H)`"
- },
- {
- "name": "h_n",
- "type": "nnabla.Variable",
- "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "LSTM",
- "description": "N-Step LSTM layer.\n\n.. math::\n {\\mathbf f_t} &=& {\\mathbf \\sigma}( {\\mathbf W_f} *{\\mathbf x_t} + {\\mathbf U_f}* {\\mathbf h_{(t-1)}} + {\\mathbf b_f})\\\\\n {\\mathbf i_t} &=& {\\mathbf \\sigma}( {\\mathbf W_i} *{\\mathbf x_t} + {\\mathbf U_i}* {\\mathbf h_{(t-1)}} + {\\mathbf b_i})\\\\\n {\\mathbf o_t} &=& {\\mathbf \\sigma}( {\\mathbf W_o} *{\\mathbf x_t} + {\\mathbf U_o}* {\\mathbf h_{(t-1)}} + {\\mathbf b_o})\\\\\n {\\mathbf c_t} &=& {\\mathbf f_t}\\odot {\\mathbf c_{(t-1)}} + {\\mathbf i_t}\\odot {\\mathbf \\tanh}({\\mathbf W_c}*{\\mathbf x_t} + {\\mathbf U_c} *{\\mathbf h_{(t-1)}} + {\\mathbf b_c})\\\\\n {\\mathbf h_t} &=& {\\mathbf o_t} \\odot {\\mathbf \\tanh}({\\mathbf c_t}).\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n * `S. Hochreiter and J. Schmidhuber, Long Short-Term Memory. <https://www.bioinf.jku.at/publications/older/2604.pdf>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(T, B, I)`."
- },
- {
- "name": "h",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(L, D, B, H)`."
- },
- {
- "name": "c",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(L, D, B, H)`."
- },
- {
- "name": "weight_l0",
- "type": "nnabla.Variable",
- "description": "weight parameters for the first layer. Shape is :math:`(D, 4, H, I + H)`."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "weight parameters for the second layer and above. Shape is :math:`(L-1, D, 4, H, D * H + H)`."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`L`). Shape is :math:`(L, D, 4, H)`."
- }
- ],
- "attributes": [
- {
- "name": "num_layers",
- "type": "int64",
- "default": 1,
- "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1."
- },
- {
- "name": "dropout",
- "type": "float32",
- "default": 0.0,
- "description": "Dropout ratio applied to parameters. Default is 0.0."
- },
- {
- "name": "bidirectional",
- "type": "boolean",
- "default": false,
- "description": "If True, bidirecitonal computation will be performed in each layer. Default is False."
- },
- {
- "name": "training",
- "type": "boolean",
- "default": true,
- "description": "Backpropagation will be performed only when it is True. Default is True."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output :math:`y` with shape :math:`(T, B, D * H)`. Its memory layout can be reshaped as :math:`(T, B, D, H)`."
- },
- {
- "name": "h_n",
- "type": "nnabla.Variable",
- "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`"
- },
- {
- "name": "c_n",
- "type": "nnabla.Variable",
- "description": "Output :math:`c_n` with shape :math:`(L, D, B, H)`"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "GRU",
- "description": "N-Step GRU layer.\n\n.. math::\n {\\mathbf r_t} &=& {\\mathbf \\sigma}( {\\mathbf W_r} *{\\mathbf x_t} + {\\mathbf U_r}* {\\mathbf h_{(t-1)}} + {\\mathbf b_r})\\\\\n {\\mathbf z_t} &=& {\\mathbf \\sigma}( {\\mathbf W_z} *{\\mathbf x_t} + {\\mathbf U_z}* {\\mathbf h_{(t-1)}} + {\\mathbf b_z})\\\\\n {\\mathbf n_t} &=& {\\mathbf \\tanh}( {\\mathbf W_n}{\\mathbf x_t}+ {\\mathbf b_{in}}+ {\\mathbf r_n}\\odot( {\\mathbf U_n}{\\mathbf h_{t-1}}+ {\\mathbf b_{hn}})) \\\\\n {\\mathbf h_t} &=& (1- {\\mathbf z_t})\\odot {\\mathbf n_t} + {\\mathbf z_t}\\odot {\\mathbf h_{t-1}}.\n\nWe use the following notations to describe the inputs and outputs below.\n:math:`T`: sequcne length, :math:`B`: batch size, :math:`I`: input size, :math:`L`: number of layers, :math:`D`: number of directions, can be either 1 or 2, :math:`H`: hidden size.\n\nReferences:\n\n * `K. cho et al., Learning Phrases Representations using RNN Encoder-Decoder for Statistical Machine Translation. <https://www.aclweb.org/anthology/D14-1179>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(T, B, I)`."
- },
- {
- "name": "h",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(L, D, B, H)`."
- },
- {
- "name": "weight_l0",
- "type": "nnabla.Variable",
- "description": "weight parameters for the first layer. Shape is :math:`(D, 3, H, I + H)`."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "weight parameters for the second layer and above. Shape is :math:`(L-1, D, 3, H, D * H + H)`."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`L`). Shape is :math:`(L, D, 4, H)`."
- }
- ],
- "attributes": [
- {
- "name": "num_layers",
- "type": "int64",
- "default": 1,
- "description": "Number of layers in the network. If set to 1, only the weights for the first layer will be invoked. Default is 1."
- },
- {
- "name": "dropout",
- "type": "float32",
- "default": 0.0,
- "description": "Dropout ratio applied to parameters. Default is 0.0."
- },
- {
- "name": "bidirectional",
- "type": "boolean",
- "default": false,
- "description": "If True, bidirecitonal computation will be performed in each layer. Default is False."
- },
- {
- "name": "training",
- "type": "boolean",
- "default": true,
- "description": "Backpropagation will be performed only when it is True. Default is True."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output :math:`y` with shape :math:`(T, B, D * H)`. Its memory layout can be reshaped as :math:`(T, B, D, H)`."
- },
- {
- "name": "h_n",
- "type": "nnabla.Variable",
- "description": "Output :math:`h_n` with shape :math:`(L, D, B, H)`"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "Convolution",
- "description": "N-D Convolution with bias.\n\nSee references for dilated convolution (a.k.a. atrous convolution).\n\nReferences:\n\n * `Chen et al., DeepLab: Semantic Image Segmentation with Deep Convolutional\n Nets, Atrous Convolution, and Fully Connected CRFs.\n <https://arxiv.org/abs/1606.00915>`_\n\n * `Yu et al., Multi-Scale Context Aggregation by Dilated Convolutions.\n <https://arxiv.org/abs/1511.07122>`_\n\nNote:\n\n Convolution is a computationally intensive operation that\n should preferrably be run with the `cudnn` backend. NNabla\n then uses CuDNN library functions to determine and cache the\n fastest algorithm for the given set of convolution parameters,\n which results in additional memory consumption which may pose\n a problem for GPUs with insufficient memory size. In that\n case, the `NNABLA_CUDNN_WORKSPACE_LIMIT` environment variable\n can be used to restrict the choice of algorithms to those that\n fit the given workspace memory limit, expressed in bytes. In\n some cases it may also be desired to restrict the automatic\n search to algorithms that produce deterministic (reproducable)\n results. This can be requested by setting the the environment\n variable `NNABLA_CUDNN_DETERMINISTIC` to a non-zero value.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": ":math:`(2 + N)`-D array (:math:`C' \\times C \\times K_1 \\times ... \\times K_N`)."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`C'`)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "base axis :math:`B`."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "FusedConvolution",
- "description": "Fused operation of Pad, Convolution, Batch Normalization, Add2 and Activation.\n\nThis is an equivalent operation to the following,\nbut may be more computationally efficient depending on the backend implementation\n(currently we don't provide an efficient implementation on any backend).\n\n.. code-block:: python\n\n h = F.pad(x, *pad_opts)\n h = F.convolution(h, weight, bias, pad=(0, ...), *conv_opts)\n h = F.batch_normalization(h, beta, gamma, mean, variance, *bn_opts)\n y = F.relu(h + z)\n\nYou can optionally disable either of pad, batch normalization, residual addition and activation.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "`weight` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "`bias` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "`beta` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "`gamma` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "mean",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "`mean` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "variance",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "`variance` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "z",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of a residual input. By specifying None, the activation function will follow immediately after BN operation."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "`base_axis` in :meth:`~nnabla.functions.convolution`. Note that the batch normalization `axes` is determined by this and `channel_last` option."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "`pad_width` in :meth:`~nnabla.functions.pad`.\nIf `len(pad) == (len(x.shape) - (base_axis+1))`, considered as `pad` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "`stride` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "`dilation` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "`group` in :meth:`~nnabla.functions.convolution`."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "`channel_last` in :meth:`~nnabla.functions.convolution`.group"
- },
- {
- "name": "decay_rate",
- "type": "float32",
- "default": 0.9,
- "description": "`decay_rate` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "`eps` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "batch_stat",
- "type": "boolean",
- "default": true,
- "description": "`batch_stat` in :meth:`~nnabla.functions.batch_normalization`."
- },
- {
- "name": "nonlinearity",
- "type": "string",
- "default": "relu",
- "description": "Activation type as string. The following is a list of available activation types\nand optional parameters specified as a vector of float by `nonlinearity_args`.\n\n=============== ===============================\nActivation type Arguments (`nonlinearity_args`)\n=============== ===============================\nidentity No argument\nrelu No argument\nsigmoid No argument\ntanh No argument\nleaky_relu [alpha] (see LeakyReLU doc)\nelu [alpha] (see ELU doc)\nrelu6 No argument\n=============== ==============================="
- },
- {
- "name": "nonlinearity_args",
- "type": "float32[]",
- "default": "list()",
- "description": "Optional arguments of nonlinearity as a vector of float.\nSee the description of the `nonlinearity` argument."
- },
- {
- "name": "pad_mode",
- "type": "string",
- "default": "constant",
- "description": "`mode` in :meth:`~nnabla.functions.pad`."
- },
- {
- "name": "constant_value",
- "type": "float32",
- "default": 0.0,
- "description": "`constant_value` in :meth:`~nnabla.functions.pad`."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "DepthwiseConvolution",
- "description": "N-D Depthwise Convolution with bias.\n\nReferences:\n\n * `F. Chollet. Xception: Deep Learning with Depthwise Separable Convolutions.\n <https://arxiv.org/abs/1610.02357>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": ":math:`(1 + N)`-D array (:math:`C \\times K_1 \\times ... \\times K_N`)."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`C'`)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "base axis :math:`B`."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "multiplier",
- "type": "int64",
- "default": 1,
- "description": "Number of output feature maps per input feature map."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nThe output map size :math:`C'` is :math:`C` multiplied by :math:`m`\n\n.. math::\n\n C' = m \\times C,\n\nwhere :math:`m` is the multiplier.\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "Deconvolution",
- "description": "N-D deconvolution, also known as transposed convolution, with bias operates backward convolution (derivative of the output w.r.t. the input) plus channel-wise learned bias.\n\nThe weights are specified in the same manner as :meth:`~nnabla.functions.convolution` , as if it was an ordinary convolution function.\nThe forward operation of :meth:`~nnabla.functions.deconvolution` will then be operationally equivalent to the backward pass of :meth:`~nnabla.functions.convolution` .\nTherefore, the number of input channels (can be seen as output channels of forward convolution) is specified in the first dimension, and the number of the output channels divided by the number of groups is specified in the second dimension.\n\nFor `stride > 1`, a parameter-wise identical deconvolution on the output\nof a convolution may not produce the same output shape as the input to\nthe convolution if, due to striding, the convolution did not fully cover\nthe input spatial dimension. The `output_padding` parameter can then be\nused to appropriately increase the calculated output shape. Note that\nthis is used to find the output shape for the deconvolution operation,\nbut not to add zero-padding to the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": ":math:`(2 + N)`-D array (:math:`C \\times C' \\times K_1 \\times ... \\times K_N`)."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`C'`)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "base axis :math:`B`."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- },
- {
- "name": "output_padding",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Additional size added to the output shape."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i =s_i (L_i - 1) - 2 p_i + d_i (k_i - 1) + 1,\n\nwhere :math:`s_i` is the stride, :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, and :math:`k_i` is the kernel size for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "DepthwiseDeconvolution",
- "description": "Depthwise deconvolution computes the transposed depthwise convolution with bias for one-dimensional and two-dimensional input data.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": ":math:`(1 + N)`-D array (:math:`C \\times K_1 \\times ... \\times K_N`)."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`C'`)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "base axis :math:`B`."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "divisor",
- "type": "int64",
- "default": 1,
- "description": "Number of input feature maps per output feature map."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nThe output map size :math:`C'` is :math:`C` multiplied by :math:`m`\n\n.. math::\n\n C' = \\frac{C}{d},\n\nwhere :math:`d` is the divisor.\n\nA spatial size of the output is calculated as\n\n.. math::\n L'_i =s_i (L_i - 1) - 2 p_i + d_i (k_i - 1) + 1,\n\nwhere :math:`s_i` is the stride, :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, and :math:`k_i` is the kernel size for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "DeformableConvolution",
- "description": "2-D Deformable Convolution with bias.\nAnother convolution with fixed output channels must be passed externally to calculate the offsets and mask.\nMask should be normalized to :math:`[0,1]` interval.\n\n.. math::\n \\begin{eqnarray}\n y(p) = \\sum_{k=1}^{K} w_k \\cdot x(p + p_k + \\Delta p_k) \\cdot \\Delta m_k,\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output, :math:`w_k` is the weight, :math:`p` is the pixel location of interest, :math:`p_k` is the fixed displacement e.g., :math:`p_k \\in \\{(-1, -1), (-1, 0), \\ldots (1, 1)\\}` for the 2D 3x3 receptive field, :math:`\\Delta p_k` is the learnable displacement, and :math:`\\Delta m_k` is the learnable scale normalized in :math:`[0, 1]` by a function like the sigmoid. Note that :math:`\\Delta p_k` and :math:`\\Delta m_k` are sample-dependent, location-dependent, and feature-independent.\n\nReferences:\n\n * `Dai et al., Deformable Convolutional Networks.\n <https://arxiv.org/abs/1703.06211>`_\n\n * `Zhu et al., Deformable ConvNets v2: More Deformable, Better Results.\n <https://arxiv.org/abs/1811.11168>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C \\times L_1 \\times ... \\times L_N`)."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": ":math:`(2 + N)`-D array (:math:`C' \\times C \\times K_1 \\times ... \\times K_N`)."
- },
- {
- "name": "offset",
- "type": "nnabla.Variable",
- "description": "Offsets for deformable convolutions. Shape is fixed to :math:`(N, deformable{\\_}group \\times 2 \\times Kh \\times Kw, H, W)`. Offsets must be calculated externally through a separate convolution layer."
- },
- {
- "name": "mask",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Normalized mask for deformable convolutions v2. Shape is fixed to :math:`(N, deformable{\\_}group \\times Kh \\times Kw, H, W)`. Masks must be calculated externally together with the offsets through a separate convolution layer."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias vector (:math:`C'`)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "base axis :math:`B`."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "deformable_group",
- "type": "int64",
- "default": 1,
- "description": "Number of deformable groups of channels."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`(B + 1 + N)`-D array (:math:`M_1 \\times ... \\times M_B \\times C' \\times L'_1 \\times ... \\times L'_N`).\n\nA spatial size of the output is calculated as\n\n.. math::\n\n L'_i = \\frac{L_i + 2 p_i - d_i (k_i - 1) - 1}{s_i} + 1,\n\nwhere :math:`L_i` is the spatial size, :math:`p_i` is the padding, :math:`d_i` is the dilation, :math:`k_i` is the kernel size, and :math:`s_i` is the stride for :math:`i`-th spatial dimension. The same calculation can also be applied to the other spatial dimensions."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "AdaptiveSeparableConvolution",
- "description": "2-D Adaptive Separable Convolution for NCHW (the channel-first tensor).\nSample and pixel dependent vertical and horizontal kernels are dynamically generated ones,\nwhich are used for approximating a feature-independent 2-D kernel in this function.\nThus, the kernel used in this function is dependent on samples and pixels but independent on features.\n\nIf the padding is needed, use the pad function to the input :math:`x` before this function.\n\nAdaptive separable convolution is formulated as\n\n.. math::\n\n \\tilde{I}(c, h, w) = \\sum_{j, i} K_v(j, h, w) \\times K_h(i, h, w) \\times I(c, h + j, w + i),\n\nwhere :math:`I(c, h, w)` and :math:`\\tilde{I}(c, h, w)` are the input and output images\nat :math:`c`-th channel, :math:`h`-th height, :math:`w`-th width.\n:math:`K_V(:, h, w)` and :math:`K_h(:, h, w)` are vertical and horizontal 1-D kernels\nat :math:`h`-th height and :math:`w`-th width.\n\nReferences:\n\n * `Simon Niklaus, Long Mai, Feng Liu,\n Video Frame Interpolation via Adaptive Separable Convolution,\n <https://arxiv.org/abs/1708.01692>`_\n\n * `Mart Kartasev, Carlo Rapisarda, Dominik Fay,\n Implementing Adaptive Separable Convolution for Video Frame Interpolation,\n <https://arxiv.org/abs/1809.07759>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": ":math:`4-D` array (:math:`B \\times C \\times H \\times W`)"
- },
- {
- "name": "vertical_kernel",
- "type": "nnabla.Variable",
- "description": ":math:`4-D` array (:math:`B \\times K_v \\times H \\times W`)"
- },
- {
- "name": "horizontal_kernel",
- "type": "nnabla.Variable",
- "description": ":math:`4-D` array (:math:`B \\times K_h \\times H \\times W`)"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": ":math:`4-D` array (:math:`B \\times C \\times H - K_v + 1 \\times W - K_h + 1`)"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "MaxPooling",
- "description": "Max pooling. It pools the maximum values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\max_{k_1, k_2 \\in K} (x_{i_1 + k_1, i_2 + k_2})\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "attributes": [
- {
- "name": "kernel",
- "required": true,
- "type": "shape",
- "description": "Kernel sizes for each spatial axis."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "kernel",
- "description": "Subsampling factors for each spatial axis."
- },
- {
- "name": "ignore_border",
- "type": "boolean",
- "default": true,
- "description": "If false, kernels covering borders are also considered for the output."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * len(kernel)",
- "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Maximum values variable"
- }
- ],
- "category": "Pool"
- },
- {
- "name": "AveragePooling",
- "description": "Average pooling. It pools the averaged values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\frac{1}{K_1 K_2} \\sum_{k1} \\sum_{k2} x_{i_1 + k_1, i_2 + k_2}\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "attributes": [
- {
- "name": "kernel",
- "required": true,
- "type": "shape",
- "description": "Kernel sizes for each spatial axis."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "kernel",
- "description": "Subsampling factors for each spatial axis."
- },
- {
- "name": "ignore_border",
- "type": "boolean",
- "default": true,
- "description": "If false, kernels covering borders are also considered for the output."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * len(kernel)",
- "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- },
- {
- "name": "including_pad",
- "type": "boolean",
- "default": true,
- "description": "If true, border padding values are considered for the output."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Average values variable"
- }
- ],
- "category": "Pool"
- },
- {
- "name": "GlobalAveragePooling",
- "description": ".. WARNING::\n This function is experimental support, so please do not actively use it.\n\nGlobal average pooling. It pools an averaged value from the whole image",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Average values variable"
- }
- ],
- "category": "Pool"
- },
- {
- "name": "SumPooling",
- "description": "Sum pooling. It pools the summed values inside the scanning kernel:\n\n.. math::\n y_{i_1, i_2} = \\sum_{k1} \\sum_{k2} x_{i_1 + k_1, i_2 + k_2}\n\nwhere :math:`x_{i_1 + k_1, i_2 + k_2}` is the input and :math:`y_{i_1, i_2}` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "attributes": [
- {
- "name": "kernel",
- "required": true,
- "type": "shape",
- "description": "Kernel sizes for each spatial axis."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "kernel",
- "description": "Subsampling factors for each spatial axis."
- },
- {
- "name": "ignore_border",
- "type": "boolean",
- "default": true,
- "description": "If false, kernels covering borders are also considered for the output."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * len(kernel)",
- "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Summed values variable"
- }
- ],
- "category": "Pool"
- },
- {
- "name": "Unpooling",
- "description": "Inverse operation of pooling. It spreads the input values:\n\n.. math::\n y_{k_1 i_1 + j_1, k_2 i_2 + j_2} = x_{i_1, i_2}\n\nwhere :math:`_{i_1, i_2}` is the input and :math:`y_{k_1 i_1 + j_1, k_2 i_2 + j_2}` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "attributes": [
- {
- "name": "kernel",
- "required": true,
- "type": "shape",
- "description": "Kernel sizes for each spatial axis."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Spread values variable"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "Embed",
- "description": "Embed slices of a matrix/tensor with indexing array/tensor.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Indices with shape :math:`(I_0, ..., I_N)`"
- },
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "Weights with shape :math:`(W_0, ..., W_M)`"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output with shape :math:`(I_0, ..., I_N, W_1, ..., W_M)`"
- }
- ],
- "category": "Layer"
- },
- {
- "name": "RoiAlign",
- "description": "Map Regions of Interest (RoI) defined by bounding `boxes` to features\n of `output_size` height and width using bilinear interpolation with\n `sampling_ratio` points in the interpolation grid.\n\n >>> import numpy as np, nnabla as nn, nnabla.functions as F\n >>> nn.set_auto_forward(True)\n >>> input = F.pad(F.constant(1, (1, 1, 2, 2)) * 2, (1, 1, 1, 1), \"constant\", 1)\n >>> print(input.d)\n [[[[1. 1. 1. 1.]\n [1. 2. 2. 1.]\n [1. 2. 2. 1.]\n [1. 1. 1. 1.]]]]\n >>> boxes = nn.Variable.from_numpy_array([[0, 0, 0, 4, 4], [0, 1, 1, 3, 3]])\n >>> output = F.roi_align(input, boxes, (2, 2))\n >>> print(output.d[0])\n [[[[1.25 1.25]\n [1.25 1.25]]]\n >>> print(output.d[1])\n [[[2. 2. ]\n [2. 2. ]]]]\n\n The `spatial_scale` argument tuple may be used to appropriately scale\n the box coordinates, for example, to scale normalized box coordinate to\n the input height and width dimensions.\n\n >>> input = F.reshape(F.arange(1, 13), (1, 1, 3, 4))\n >>> print(input.d)\n >>> boxes = nn.Variable.from_numpy_array([[0, 1/4, 1/3, 3/4, 2/30]])\n >>> output = F.roi_align(input, boxes, (1, 2), spatial_scale=(3, 4))\n >>> print(input.d)\n [[[[6. 7.]]]]\n\n References:\n\n * `He et al., Mask R-CNN. <https://arxiv.org/abs/1703.06870v3>`_",
- "inputs": [
- {
- "name": "input",
- "type": "nnabla.Variable",
- "description": "N-D array with shape :math:`(N, H, W, C)` or :math:`(N, C, H, W)`."
- },
- {
- "name": "boxes",
- "type": "nnabla.Variable",
- "description": "N-D array with shape :math:`(K, 5)` containing box coordinates in (b, x1, y1, x2, y2) format where b is the batch index. Note that an invalid (out-of-range) batch index will generate an error only when running on CPU; when using a GPU context the batch index values are clipped to the range of input samples."
- }
- ],
- "attributes": [
- {
- "name": "output_size",
- "required": true,
- "type": "shape",
- "description": "the height and width of the output feature maps."
- },
- {
- "name": "spatial_scale",
- "type": "float32[]",
- "default": "(1.0, 1.0)",
- "description": "Scaling factor from box to input coordinates, as (x, y)."
- },
- {
- "name": "sampling_ratio",
- "type": "int64",
- "default": -1,
- "description": "The number of sampling points used for interpolation. Computed as `ceil((y2 - y1) / output_size[0])` for height and likewise for width if `sampling_ratio <= 0`."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with shape :math:`(K, C, output\\_size[0], output\\_size[1])`\nor :math:`(K, output\\_size[0], output\\_size[1], C)`."
- }
- ],
- "category": "Layer"
- },
- {
- "name": "Sigmoid",
- "description": "Element-wise sigmoid function.\n\n.. math::\n\n f(x) = \\frac{1}{1 + \\exp(-x)},",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "Swish",
- "description": "Element-wise swish function, by Ramachandran et al. (2017).\n\n.. math::\n\n y_i = \\frac{x_i}{1 + \\exp(-x_i)},\n\nReferences:\n * `Prajit Ramachandran, Barret Zoph, and Quoc V. Le, Swish: a Self-Gated Activation Function, arXiv:1710.05941 [cs.NE]\n <https://arxiv.org/abs/1710.05941>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "Tanh",
- "description": "Element-wise hyperbolic tangent (tanh) function.\n\n.. math::\n y_i = \\tanh (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "ReLU",
- "description": "Element-wise Rectified Linear Unit (ReLU) function.\n\n.. math::\n y_i = \\max (0, x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "LeakyReLU",
- "description": "Element-wise Leaky Rectified Linear Unit (ReLU) function.\n\nIt is defined as:\n\n.. math::\n y_i = \\alpha * \\min(0, x_i) + \\max (0, x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "alpha",
- "type": "float32",
- "default": 0.1,
- "description": "The slope value multiplied to negative numbers. :math:`\\alpha` in the definition."
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "Softmax",
- "description": "Softmax normalization. Calculates\n\n.. math::\n y_i = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}\n\nalong the dimension specified by `axis`, where :math:`x_i` is the input and :math:`y_i` is the output.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array. Typically indicates a score."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis normalization is taken."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "LogSoftmax",
- "description": "Fused operation of Softmax normalization followed by log, which is defined as\n\n.. math::\n y_i = \\log \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)},\n\nwhere :math:`y_i` is the input and :math:`x_i` is the output at i-th channel.\nAn advantage of this fusion is reducing the numerical instability due to the log application.\n\nThe original definition can be rewritten as\n\n.. math::\n y_i = x_i - \\max_j(x_j) - \\log\\left(\\sum_j \\exp(x_j - \\max_k(x_k))\\right).\n\nIt is more stable as a log is always applied to a value :math:`\\ge e`, while a log can be evaluated for 0 in the non-fused operation.\n\nAlso, backward gradient computation is more stable than the original one as it doesn't perform division by x due to a gradient of log. The definition is as following.\n\n.. math::\n dx_i = dy_i - y_i * \\sum_j dy_j\n\nwhere :math:`dx_i` and :math:`dy_i` denote gradients of loss\nwrt :math:`x_i` and :math:`y_i` respectively.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array. Typically indicates a score."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis normalization is taken."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "ELU",
- "description": "Element-wise Exponential Linear Unit (ELU) function.\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n x_i & (x > 0)\\\\\n \\alpha (\\exp(x_i) - 1) & (x \\leq 0)\n \\end{array} \\right..\n\nReferences:\n * `Clevart et al., Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs).\n <http://arxiv.org/abs/1511.07289>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "alpha",
- "type": "float64",
- "default": 1.0,
- "description": "Coefficient for negative outputs. :math:`\\alpha` in definition"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "SELU",
- "description": "Element-wise Scaled Exponential Linear Unit (SELU) function by Klambauer et al. (2017).\n\n.. math::\n y_i= \\lambda \\left\\{\n \\begin{array}{ll}\n x_i & (x > 0)\\\\\n \\alpha (\\exp(x_i) - 1) & (x \\leq 0)\n \\end{array} \\right..\n\nThe coefficients :math:`\\lambda` and :math:`\\alpha` default to the following values :math:`\\lambda_{01}` and :math:`\\alpha_{01}`, respectively, provided by Klambauer et al. (2017):\n\n.. math::\n \\begin{array}{lll}\n \\lambda_{01} &=& \\left( 1 - \\operatorname{erfc}\\left( \\frac{1}{\\sqrt{2}} \\right) \\sqrt{e} \\right)\n \\sqrt{2 \\pi} \\\\\n && \\left(\n 2 \\operatorname{erfc} \\left( \\sqrt{2} \\right) e^2\n + \\pi \\operatorname{erfc}\\left( \\frac{1}{\\sqrt{2}} \\right)^2 e\n \\right. \\\\\n && \\left.\n - 2(2 + \\pi) \\operatorname{erfc} \\left( \\frac{1}{\\sqrt{2}} \\right) \\sqrt{e}\n + \\pi + 2\n \\right)^{-1/2} \\\\\n &\\approx& 1.0507 \\\\\n \\alpha_{01} &=& - \\frac\n {\\sqrt {\\frac {2}{\\pi}}}\n {\\operatorname{erfc} \\left( \\frac{1}{\\sqrt{2}} \\right) \\exp \\left(\\frac {1} {2} \\right) - 1} \\\\\n &\\approx& 1.67326\n \\end{array}\n\n\nReferences:\n * `Klambauer, G., Unterthiner, T., Mayr, A., & Hochreiter, S. (2017).\n Self-Normalizing Neural Networks. In Advances in Neural Information\n Processing Systems (NIPS). <https://arxiv.org/abs/1706.02515>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "scale",
- "type": "float64",
- "default": 1.05070098735548,
- "description": "The coefficient :math:`\\lambda` in the definition."
- },
- {
- "name": "alpha",
- "type": "float64",
- "default": 1.673263242354377,
- "description": "The coefficient :math:`\\alpha` in the definition."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "CReLU",
- "description": "Element-wise Concatenated Rectified Linear Unit (CReLU) function.\nThis function calculates the ReLU of :math:`x` and :math:`-x` , then concatenates the results together at a specified axis,\nand returns the resulting array.\n\n\nReferences:\n * `Wenling Shang, Kihyuk Sohn, Diogo Almeida, Honglak Lee.\n Understanding and Improving Convolutional Neural Networks\n via Concatenated Rectified Linear Units.\n <https://arxiv.org/abs/1603.05201>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 1,
- "description": "The ReLU activations of positive inputs and negative inputs are concatenated at axis."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array where axis dimension is doubled by concatenating."
- }
- ],
- "category": "Activation"
- },
- {
- "name": "CELU",
- "description": "Element-wise Concatenated Exponential Linear Unit (CELU) function.\nConcatenates ELU outputs of positive and negative inputs together at specified axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "alpha",
- "type": "float64",
- "default": 1.0,
- "description": "Coefficient for negative outputs. :math:`\\alpha` in definition."
- },
- {
- "name": "axis",
- "type": "int64",
- "default": 1,
- "description": "The ELU activations of positive inputs and negative inputs are concatenated at axis."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array where axis dimension is doubled by concatenating."
- }
- ],
- "category": "Activation"
- },
- {
- "name": "PReLU",
- "description": "Element-wise Parametrized Rectified Linear Unit function. Calculates:\n\n.. math::\n y_i = \\max(0, x_i) + w_i \\min(0, x_i)\n\nwhere negative slope :math:`w` is learned and can vary across channels (an\naxis specified with `base_axis`).",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "(N-D array) Input"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "(N-D array) Weights"
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "category": "Activation"
- },
- {
- "name": "GELU",
- "description": "Gaussian Error Unit (GELU) function.\n\n.. math::\n GELU(x) = xP(X \\leq x) = x \\Phi (x)\n\nwhich is approximated by\n\n.. math::\n GELU(x) = 0.5x (1 + \\tanh ( \\sqrt(2/\\pi)(x + 0.044715x^3) ))\n\nReferences:\n * `Dan Hendrycks and Kevin Gimpel.\n Gaussian Error Linera Units (GELUs).\n <https://arxiv.org/abs/1606.08415>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "Mish",
- "description": "Mish activation function.\n\n.. math::\n Mish(x) = x \\tanh(\\log(1+\\exp(x_i)))\n\n\nReferences:\n * `Diganta Misra.\n Mish A Self Regularized Non-Monotonic Neural Activation Function.\n <https://arxiv.org/abs/1908.08681>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "ReLU6",
- "description": "Element-wise ReLU6 function.\nCapping ReLU activation to 6 is often observed to learn sparse features earlier.\n\n.. math::\n ReLU6(x) = \\min(\\max(0,x,),6)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "HardSigmoid",
- "description": "Segment-wise linear approximation of sigmoid.\nPreferable when speed of computation is more important than precision.\nReturns :math:`0` if :math:`x < -2.5`.\nReturns :math:`1` if :math:`x> 2.5`.\nReturns :math:`0.2x + 0.5` if :math:`-2.5 <= x <= 2.5`.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "HardTanh",
- "description": "Element-wise HardTanh function.\nComputationally cheaper than Tanh function.\nReturns :math:`1` if :math:`x > 1`.\nReturns :math:`-1` if :math:`x < -1`.\nReturns :math:`x` otherwise.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "LogSigmoid",
- "description": "Element-wise LogSigmoid function.\n\n.. math::\n LogSigmoid(x) = \\log(1/(1+\\exp(-x_i)))",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "SoftPlus",
- "description": "Element-wise SoftPlus function.\nUnlike Sigmoid and Tanh that have upper and lower bound, SoftPlus is only lower-bounded by 0.\n\n.. math::\n SoftPlus(x) = \\frac{1}{\\beta} * \\log(1+\\exp(\\beta * x_i))",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "beta",
- "type": "float64",
- "default": 1.0,
- "description": "the `beta` value for SoftPlus formulation"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "SoftSign",
- "description": "Element-wise SoftSign.\nCan be used in place of Tanh function.\nWhile Tanh converges exponentially, SoftSign converges polynomially.\n\n.. math::\n SoftSign(x) = x/(1+|x|)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "TanhShrink",
- "description": "Element-wies TanhShrink function.\n\n.. math::\n TanhShrink(x) = x - \\tanh(x)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "Sinc",
- "description": "Element-wise Sinc function.\nUnlike other popular activation functions, it has rises and falls.\nreturns :math:`1` if :math:`x = 0`.\nreturns :math:`\\sin(x)/x` otherwise.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Activation"
- },
- {
- "name": "FusedBatchNormalization",
- "description": "Batch normalization fused with add2 (adding a residual input) and activation.\n\nThis is an equivalent operation to the following,\nbut is more computationally efficient:\n\n.. code-block:: python\n\n h = F.batch_normalization(x, beta, gamma, mean, variance, *opts)\n y = F.relu(h + z)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "description": "N-D array of gamma which is learned."
- },
- {
- "name": "mean",
- "type": "nnabla.Variable",
- "description": "N-D array of running mean (modified during forward execution)."
- },
- {
- "name": "variance",
- "type": "nnabla.Variable",
- "description": "N-D array of running variance (modified during forward execution)."
- },
- {
- "name": "z",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of a residual input. By specifying None, the activation function will follow immediately after BN operation."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "(1,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "decay_rate",
- "type": "float32",
- "default": 0.9,
- "description": "Decay rate of running mean and variance."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "batch_stat",
- "type": "boolean",
- "default": true,
- "description": "Use mini-batch statistics rather than running ones."
- },
- {
- "name": "nonlinearity",
- "type": "string",
- "default": "relu",
- "description": "Activation chosen from ('relu')."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "BatchNormalization",
- "description": "Batch normalization.\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n \\sigma^2 &=& \\frac{1}{M} \\left(\\sum x_i - \\mu\\right)^2 \\\\\n \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\\\\n y_i &=& \\hat{x}_i \\gamma + \\beta.\n \\end{eqnarray}\n\n\nAt testing time, the mean and variance values used are those that were computed during training by moving average.\n\nReferences:\n\n * `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.\n <https://arxiv.org/abs/1502.03167>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of gamma which is learned."
- },
- {
- "name": "mean",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of running mean (modified during forward execution)."
- },
- {
- "name": "variance",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of running variance (modified during forward execution)."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "(1,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "decay_rate",
- "type": "float32",
- "default": 0.9,
- "description": "Decay rate of running mean and variance."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "batch_stat",
- "type": "boolean",
- "default": true,
- "description": "Use mini-batch statistics rather than running ones."
- },
- {
- "name": "no_scale",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the scale term is omitted."
- },
- {
- "name": "no_bias",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the bias term is omitted."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "GroupNormalization",
- "description": "Applies Group Normalization over an input tensor, which is defined as:\n\n.. math::\n \\begin{eqnarray}\n \\mu^g &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^g \\\\\n \\sigma^g &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^g - \\mu^g\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^g}{\\sigma^g} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^g` and :math:`\\sigma^g` are the mean and std of each group which contains `num_channels / num_groups` channels,\nand :math:`\\gamma` and :math:`\\beta` are adaptive gains and biases.\n\nThe input channels, specified by :attr:`channel_axis`, are separated into :attr:`num_groups` groups,\nand the mean and std are calculated over the each group.\nFor example, if the input shape is [B, C, H, W] (= channel_axis=1, batch_axis=0),\nan input variable is once reshaped to [B, num_groups, C / num_groups, H, W]\nand standardize by its mean and std whose shapes are [B, num_groups, 1, 1, 1].\nFinally, an output variable is reshaped again to the original input shape (= [B, C, H, W] in the case above).\n\nReferences:\n\n * `Yuxin Wu, Kaiming He, Group Normalization.\n <https://arxiv.org/abs/1803.08494>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of gamma which is learned."
- }
- ],
- "attributes": [
- {
- "name": "num_groups",
- "type": "int64",
- "default": 1,
- "description": "A number of groups. The channel dim of 'x' must be integer multiple of `num_groups`."
- },
- {
- "name": "channel_axis",
- "type": "int64",
- "default": 1,
- "description": "Channel axis."
- },
- {
- "name": "batch_axis",
- "type": "int64[]",
- "default": "(0,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "no_scale",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the scale term is omitted."
- },
- {
- "name": "no_bias",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the bias term is omitted."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "InstanceNormalization",
- "description": "Applies Instance Normalization over an input tensor, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu^i &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^i \\\\\n \\sigma^i &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^i - \\mu^i\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^i}{\\sigma^i} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^i` and :math:`\\sigma^i` are the mean and std of each instance which is separately calculated for each batch and channel,\nand :math:`\\gamma` and :math:`\\beta` are adaptive gains and biases.\n\nIf the input shape is [B, C, H, W] (= channel_axis=1, batch_axis=0), the shape of calculated mean and std are [B, C, 1, 1]\n\nReferences:\n\n * `Dmitry Ulyanov, Andrea Vedaldi, Victor Lempitsky, Instance Normalization: The Missing Ingredient for Fast Stylization.\n <https://arxiv.org/abs/1607.08022>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of gamma which is learned."
- }
- ],
- "attributes": [
- {
- "name": "channel_axis",
- "type": "int64",
- "default": 1,
- "description": "Channel axis."
- },
- {
- "name": "batch_axis",
- "type": "int64[]",
- "default": "(0,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "no_scale",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the scale term is omitted."
- },
- {
- "name": "no_bias",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the bias term is omitted."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "LayerNormalization",
- "description": "Applies Layer Normalization over an input tensor, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu^l &=& \\frac{1}{H} \\sum_{i=1}^{H} x_i^l \\\\\n \\sigma^l &=& \\sqrt{\\frac{1}{H} \\sum_{i=1}^{H} \\left(x_i^l - \\mu^l\\right)^2 + \\epsilon} \\\\\n y &=& \\frac{x - \\mu^l}{\\sigma^l} \\gamma + \\beta\n \\end{eqnarray}\n\nwhere :math:`x` and :math:`y` are input and output variable,\n:math:`\\mu^l` and :math:`\\sigma^l` are the mean and std of each layer which is separately calculated for each batch,\nand :math:`\\beta` and :math:`\\gamma` are adaptive biases and gains.\n\nIf the input shape is [B, C, H, W] (= batch_axis=0), the shape of calculated mean and std are [B, 1, 1, 1]\n\nReferences:\n\n * `Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton, Layer Normalization.\n <https://arxiv.org/abs/1607.06450>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of gamma which is learned."
- }
- ],
- "attributes": [
- {
- "name": "batch_axis",
- "type": "int64[]",
- "default": "(0,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "no_scale",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the scale term is omitted."
- },
- {
- "name": "no_bias",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the bias term is omitted."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "NormNormalization",
- "description": "Norm normalization.\n \n.. math::\n y = \\frac{x_i}{\\|x\\|_p}",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "p",
- "type": "float32",
- "default": 2.0,
- "description": "Order of the norm."
- },
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced. If empty list is given, all dimensions are reduced."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-12,
- "description": "Epsilon for the normalization. This `eps` is added before taking the p-th root in the norm computation."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "SyncBatchNormalization",
- "description": "Synchronized Batch Normalization:\n\nFor some tasks (e.g., semantic segmentation), batch size will be too small and BatchNormalization layer might not work well.\nSyncBatchNorlization layer solves these problems by synchronizing batch stats (mean and var) between multiple processes.\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n \\sigma^2 &=& \\frac{1}{M} \\left(\\sum x_i - \\mu\\right)^2 \\\\\n \\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\\\\n y_i &=& \\hat{x}_i \\gamma + \\beta.\n \\end{eqnarray}\n\nReferences:\n\n * Implementing Synchronized Multi-GPU Batch Normalization https://hangzhang.org/PyTorch-Encoding/notes/syncbn.html\n\nNote:\n Since v1.32.0, the gradients of beta and gamma are not synchronized after backward computation (they had been synchronized previously).\n Users are responsible for synchronizing the gradients of beta and gamma by performing all-reduce,\n which is naturally done by performing all-reduce for gradients of all the parameters as we do usually in data parallel distributed training.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "description": "N-D array of gamma which is learned."
- },
- {
- "name": "mean",
- "type": "nnabla.Variable",
- "description": "N-D array of running mean (modified during forward execution)."
- },
- {
- "name": "variance",
- "type": "nnabla.Variable",
- "description": "N-D array of running variance (modified during forward execution)."
- }
- ],
- "attributes": [
- {
- "name": "comm",
- "required": true,
- "description": "The communicator"
- },
- {
- "name": "group",
- "type": "string",
- "default": "world",
- "description": "The name of the communicator group"
- },
- {
- "name": "axes",
- "type": "int64[]",
- "default": "(1,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "decay_rate",
- "type": "float32",
- "default": 0.9,
- "description": "Decay rate of running mean and variance."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "batch_stat",
- "type": "boolean",
- "default": true,
- "description": "Use mini-batch statistics rather than running ones."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "TensorNormalization",
- "description": "General tensor normalization.\nInput variable `x` is normalized by mean and std calculated by `x` itself.\nMean and variance are calculated along `axes`.\nFor example, if the input shape is (B, C, H, W) and axes is [0, 1],\nthe shape of calculated mean and std are (B, C, 1 ,1).",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "beta",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of beta which is learned."
- },
- {
- "name": "gamma",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "N-D array of gamma which is learned."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "(1,)",
- "description": "Axes mean and variance are taken."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- },
- {
- "name": "no_scale",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the scale term is omitted."
- },
- {
- "name": "no_bias",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the bias term is omitted."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "WeightNormalization",
- "description": "Weight normalization.\n\n.. math::\n \\mathbf{w}_{WN} = g \\dfrac{\\mathbf{w}}{\\|\\mathbf{w}\\|}\n\nwhere :math:`\\mathbf{w}` is the input weights to be normalized.\nand :math:`g` is learnable multiplication factors each of which is applied to each data at `dim`.\n\nReferences:\n * `Tim Salimans, Diederik P. Kingma, Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks. <https://arxiv.org/abs/1602.07868>`_",
- "inputs": [
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "N-D array of learnable weights."
- },
- {
- "name": "g",
- "type": "nnabla.Variable",
- "description": "1-D array of learnable scales."
- }
- ],
- "attributes": [
- {
- "name": "dim",
- "type": "int64",
- "default": 0,
- "description": "Output dimension. For the other dimensions, the norms are computed."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-12,
- "description": "Epsilon for the normalization. This `eps` is added before taking the sqrt in the norm computation."
- }
- ],
- "outputs": [
- {
- "name": "w_wn",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "WeightStandardization",
- "description": "Applies Weight Standardization over an input weight, which is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu_{W_i} &=& \\frac{1}{I} \\sum_{j=1}^{I} W_{ij} \\\\\n \\sigma_{W_i} &=& \\sqrt{\\frac{1}{I} \\sum_{i=1}^{I} \\left(W_{ij} - \\mu_{W_{i}}\\right)^2 + \\epsilon} \\\\\n \\hat{W_{ij}} &=& \\frac{W_{ij} - \\mu_{W_i}}{\\sigma_{W_i}} \\\\\n y &=& \\hat{W} \\ast x\n \\end{eqnarray}\n\nReferences:\n\n * `Siyuan Qiao, Huiyu Wang, Chenxi Liu, Wei Shen, Alan Yuille, Weight Standardization\n <https://arxiv.org/pdf/1903.10520v1.pdf>`_",
- "inputs": [
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "N-D array of learnable weights."
- }
- ],
- "attributes": [
- {
- "name": "channel_axis",
- "type": "int64",
- "default": 0,
- "description": "An axis for output channel. Default value is 0 which assumes the weights of convolution."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-05,
- "description": "Tiny value to avoid zero division by std."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "SpectralNorm",
- "description": "Spectral Normalization.\n\n.. math::\n\n W_{sn} = \\frac{W}{\\sigma(W)}\n\nwhere :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration.\n\nReferences:\n\n Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida, \n \"Spectral Normalization for Generative Adversarial Networks\", \n International Conference on Learning Representations. 2018.",
- "inputs": [
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "N-D array of learnable weights. This is normally network parameter."
- },
- {
- "name": "u",
- "type": "nnabla.Variable",
- "description": "1-D array of singular vector. When `test == False`, the data region of `u` will be updated during forward calculation."
- }
- ],
- "attributes": [
- {
- "name": "dim",
- "type": "int64",
- "default": 0,
- "description": "Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing."
- },
- {
- "name": "itr",
- "type": "int64",
- "default": 1,
- "description": "Number of power iterations. Default is 1."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1e-12,
- "description": "Epsilon for the normalization. This `eps` is added before taking the sqrt in the norm computation."
- },
- {
- "name": "test",
- "type": "boolean",
- "default": false,
- "description": "When in `True`, `u` will not be updated. Default is `False`."
- },
- {
- "name": "output_u",
- "type": "boolean",
- "default": false,
- "description": "Output original `u` or not. `u` is updated when `test == False` but you can get original `u` as output with this option. Default is `False`."
- }
- ],
- "outputs": [
- {
- "name": "w_sn",
- "type": "nnabla.Variable",
- "description": "Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`."
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "MeanSubtraction",
- "description": "It subtracts the mean of the elements of the input array,\nand normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy\nin various tasks such as image classification.\n\nAt training time, this function is defined as\n\n.. math::\n \\begin{eqnarray}\n \\mu &=& \\frac{1}{M} \\sum x_i \\\\\n y_i &=& x_i - \\mu\n \\end{eqnarray}\n\nAt testing time, the mean values used are those that were computed during training by moving average.\n\nNote:\n The backward performs an approximated differentiation that takes into account only the latest mini-batch.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "rmean",
- "type": "nnabla.Variable",
- "description": "N-D array of running mean (modified during forward execution)."
- },
- {
- "name": "t",
- "type": "nnabla.Variable",
- "description": "Scalar of num of iteration of running mean (modified during forward execution)."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "update_running_mean",
- "type": "boolean",
- "default": true,
- "description": "Update running mean during forward execution."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "ClipGradByValue",
- "description": "In forward pass, the function behaves as the identity.\n\nIn backward pass,\n\n .. math::\n g_x = \\begin{cases}\n max & (g_y > max) \\\\\n g_y & (otherwise) \\\\\n min & (g_y < min)\n \\end{cases}.\n\nA typical case for use is to prevent the gradient explosion through a whole computational graph.\nFor example, if you want to clip gradient values for each feature map,\n\n.. code-block:: python\n\n x = nn.Variable([16, 3, 32, 32])\n min = F.broadcast(nn.Variable.from_numpy_array(np.asarray([-1.0]).reshape((1, 1, 1, 1))), (16, 3, 32, 32))\n max = F.broadcast(nn.Variable.from_numpy_array(np.asarray([1.0]).reshape((1, 1, 1, 1))), (16, 3, 32, 32))\n c = F.clip_grad_by_value(x, min=min, max=max)\n h = PF.convolution(c, 64, (3, 3), pad=(1, 1))",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- },
- {
- "name": "min",
- "type": "nnabla.Variable",
- "description": "N-D array of minimum input value by which the gradients of the `y` are clipped. Note that the shape of `min` must be the same as `x`'s and the backward to `min` is not performed."
- },
- {
- "name": "max",
- "type": "nnabla.Variable",
- "description": "N-D array of maximum input value by which the gradients of the `y` are clipped. Note that the shape of `max` must be the same as `x`'s and the backward to `max` is not performed."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "ClipGradByNorm",
- "description": "In the forward pass, the function behaves like the identity.\n\nIn the backward pass,\n\n.. math::\n\n g_x = N \\times \\frac{g_y}{\\|g_y\\|_2}.\n\nwhere :math:`g_x` is the gradient w.r.t the input, :math:`g_y` is the gradient w.r.t. the output,\nand :math:`N` is `clip_norm` where the norm of :math:`g_y` becomes. this is the case that `axes` is not set.\nWhen `axes` is set, the norm is computed over `axes`.\n\nA typical case for use is to prevent the gradient explosion through a whole computational graph.\nFor example, if you want to normalize gradient values over feature axis,\n\n.. code-block:: python\n\n x = nn.Variable([16, 3, 32, 32])\n c = F.clip_grad_by_norm(x, axes=(1, ))\n h = PF.convolution(c, 64, (3, 3), pad=(1, 1))",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of input."
- }
- ],
- "attributes": [
- {
- "name": "clip_norm",
- "type": "float32",
- "default": 1.0,
- "description": "Clip to the norm of input to `clip_norm` in the backward pass."
- },
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar. This is used in the forward pass."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "category": "Normalization"
- },
- {
- "name": "Sum",
- "description": "Reduces a matrix along a specified axis with the sum function.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "CumSum",
- "description": "Cumulative sum along a given axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "Axis along which cumulative sum is to be calculated"
- },
- {
- "name": "exclusive",
- "type": "boolean",
- "default": false,
- "description": "If True, perform exclusive cumsum"
- },
- {
- "name": "reverse",
- "type": "boolean",
- "default": false,
- "description": "If True, perform cumsum in reverse direction"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Mean",
- "description": "Reduces a matrix along a specified axis with the mean function.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Max",
- "description": "Reduction along axis or axes with max operation.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- },
- {
- "name": "with_index",
- "type": "boolean",
- "default": false,
- "description": "Return values and indices."
- },
- {
- "name": "only_index",
- "type": "boolean",
- "default": false,
- "description": "Return only indices."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Min",
- "description": "Reduction along axis or axes with min operation.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- },
- {
- "name": "with_index",
- "type": "boolean",
- "default": false,
- "description": "Return values and indices."
- },
- {
- "name": "only_index",
- "type": "boolean",
- "default": false,
- "description": "Return only indices."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Norm",
- "description": "Reduction along axis or axes with norm operation.\n\n.. math::\n y = \\|x\\|_p = \\left( \\sum_i |x_i|^p \\right)^{\\frac{1}{p}}",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "p",
- "type": "float32",
- "default": 2.0,
- "description": "Order of the norm."
- },
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced. If empty list is given, all dimensions are reduced to scalar."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Prod",
- "description": "Reduction along axis or axes with product operation.\n\nNote:\n Backward computation is not accurate in a zero value input.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "range(x.ndim)",
- "description": "Axes to be reduced."
- },
- {
- "name": "keep_dims",
- "type": "boolean",
- "default": false,
- "description": "Flag whether the reduced axis is kept."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "CumProd",
- "description": "Cumulative product along a given axis.\n\nNote:\n Backward computation is not accurate in a zero value input.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "Axis along which cumulative product is to be calculated"
- },
- {
- "name": "exclusive",
- "type": "boolean",
- "default": false,
- "description": "If True, perform exclusive cumprod"
- },
- {
- "name": "reverse",
- "type": "boolean",
- "default": false,
- "description": "If True, perform cumprod in reverse direction"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "ReduceSum",
- "description": "Reduction along an axis with sum operation.\n\nNote:\n This is deprecated. Use ``sum`` instead.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "ReduceMean",
- "description": "Reduction by mean along an axis.\n\nNote:\n This is deprecated. Use ``mean`` instead.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Add2",
- "description": "Element-wise addition.\n\n.. math::\n y_i = x^{(0)}_i + x^{(1)}_i",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "AddN",
- "description": "Element-wise addition.\n\n.. math::\n y_i = x^{(0)}_i + . . . + x^{(n-1)}_i",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "BcAdd2",
- "description": "Note: This shouldn't be called by users.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Sub2",
- "description": "Element-wise subtraction.\n\n.. math::\n y_i = x^{(0)}_i - x^{(1)}_i",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Mul2",
- "description": "Element-wise multiplication.\n\n.. math::\n y_i = x^{(0)}_i x^{(1)}_i",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "MulN",
- "description": "Element-wise multiplication.\n\n.. math::\n y_i = x^{(0)}_i . . . x^{(n-1)}_i",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Div2",
- "description": "Element-wise division.\n\n.. math::\n y_i = \\frac{x^{(0)}_i} {x^{(1)}_i}",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Pow2",
- "description": "Element-wise power function.\n\n.. math::\n y_i = {(x^{(0)}_i)} ^ {x^{(1)}_i}",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "AddScalar",
- "description": "Element-wise scalar addition.\n\n.. math::\n y_i = x_i + v",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "MulScalar",
- "description": "Element-wise scalar multiplication.\n\n.. math::\n y_i = v x_i",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "PowScalar",
- "description": "Element-wise scalar power function.\n\n.. math::\n y_i = (x_i) ^ v",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "RSubScalar",
- "description": "Element-wise scalar subtraction.\n\n.. math::\n y_i = v - x_i",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "RDivScalar",
- "description": "Element-wise scalar division.\n\n.. math::\n y_i = \\frac{v}{x_i}",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "RPowScalar",
- "description": "Element-wise scalar power function.\n\n.. math::\n y_i = v ^ {x_i}",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Sign",
- "description": "Element-wise sign function.\n\nIn the forward pass, it is defined as\n\n.. math::\n\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n -1 & (x < 0) \\\\\n \\alpha & (x = 0)\n \\end{cases}.\n\nIn the backward pass, it is defined as\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} = 1,\n\nor in other words, it behaves as the identity function for the gradient in the backward pass.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input"
- }
- ],
- "attributes": [
- {
- "name": "alpha",
- "type": "float32",
- "default": 1.0,
- "description": "Value in case of :math:`x = 0`."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Minimum2",
- "description": "Element-wise minimum.\n\n.. math::\n y_i = \\min(x^{(0)}_i, x^{(1)}_i)",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of min value"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Maximum2",
- "description": "Element-wise maximum.\n\n.. math::\n y_i = \\max(x^{(0)}_i, x^{(1)}_i)",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of max value"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "MinimumScalar",
- "description": "Element-wise scalar minimum.\n\n.. math::\n y_i = \\min(x_i, v)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "MaximumScalar",
- "description": "Element-wise scalar maximum.\n\n.. math::\n y_i = \\max (x_i, v)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalAnd",
- "description": "Elementwise logical AND.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\neq 0 \\;\\&\\; x^{(1)}_i \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalOr",
- "description": "Elementwise logical OR.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 0 & (x^{(0)}_i = 0 \\;\\&\\; x^{(1)}_i = 0) \\\\\n 1 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalXor",
- "description": "Elementwise logical XOR.\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i = 0 \\;\\&\\; x^{(1)}_i = 0) \\\\\n 1 & (x^{(0)}_i \\neq 0 \\;\\&\\; x^{(1)}_i \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Equal",
- "description": "Element wise 'equal'\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i = x^{(1)}_i) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "NotEqual",
- "description": "Element wise 'not equal'\n\n.. math::\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 0 & (x^{(0)}_i = x^{(1)}_i) \\\\\n 1 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "GreaterEqual",
- "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\geq x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i < x^{(1)}_i)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Greater",
- "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i > x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i \\leq x^{(1)}_i)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LessEqual",
- "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i \\leq x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i > x^{(1)}_i)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Less",
- "description": "Element wise comparison. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,x^{(1)}_i) = \\begin{cases}\n 1 & (x^{(0)}_i < x^{(1)}_i) \\\\\n 0 & (x^{(0)}_i \\geq x^{(1)}_i)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "No Description"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "SearchSorted",
- "description": "Finds indices in the innermost dimension of a sorted sequance where values must be inserted in order to maintain value",
- "inputs": [
- {
- "name": "sorted_sequence",
- "type": "nnabla.Variable",
- "description": "N-D array of sorted sequence where search is to be performed. Note that this must be a sorted array"
- },
- {
- "name": "values",
- "type": "nnabla.Variable",
- "description": "N-D array of Search values"
- }
- ],
- "attributes": [
- {
- "name": "right",
- "type": "boolean",
- "default": false,
- "description": ":If True, given a value v, the function returns index i such that sorted_sequence[i-1] <= v < sorted_sequence[i] (index of closest upper bound of v). By default, this is false so the function returns index i such that a[i-1] < v <= a[i] (index of closest lower bound of v)"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array containing the required indices"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalAndScalar",
- "description": "Elementwise logical AND with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i \\neq 0 \\;\\&\\; v \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "required": true,
- "type": "boolean",
- "description": "No Description"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalOrScalar",
- "description": "Elementwise logical OR with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 0 & (x_i = 0 \\;\\&\\; v = 0) \\\\\n 1 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "required": true,
- "type": "boolean",
- "description": "No Description"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalXorScalar",
- "description": "Elementwise logical XOR with scalar.\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i = 0 \\;\\&\\; v = 0) \\\\\n 1 & (x_i \\neq 0 \\;\\&\\; v \\neq 0) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "required": true,
- "type": "boolean",
- "description": "No Description"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "EqualScalar",
- "description": "Element wise 'equal' with a scalar\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 1 & (x_i = v) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "NotEqualScalar",
- "description": "Element wise 'not equal' with a scalar\n\n.. math::\n f(x_i,v) = \\begin{cases}\n 0 & (x_i = v) \\\\\n 1 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "GreaterEqualScalar",
- "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i \\geq v \\\\\n 0 & (x^{(0)}_i < v\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "GreaterScalar",
- "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i > v \\\\\n 0 & (x^{(0)}_i \\leq v\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LessEqualScalar",
- "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i \\leq v) \\\\\n 0 & (x^{(0)}_i > v)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LessScalar",
- "description": "Element wise comparison with a scalar. The :math:`i^{th}` element of the output is:\n\n.. math::\n\n f(x^{(0)}_i,v) = \\begin{cases}\n 1 & (x^{(0)}_i < v) \\\\\n 0 & (x^{(0)}_i \\geq v)\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 1.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "LogicalNot",
- "description": "Element-wise logical NOT operation\n\n.. math::\n f(x_i) = \\begin{cases}\n 1 & (x_i = 0) \\\\\n 0 & otherwise\n \\end{cases}.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "IsNaN",
- "description": "Test element-wise for NaN and return a ``0/1`` array.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "IsInf",
- "description": "Test element-wise for ``inf/-inf`` and return a ``0/1`` array.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "ResetNaN",
- "description": "Replace NaNs with a scalar value specified by ``val``.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 0.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "ResetInf",
- "description": "Replace ``-inf/inf`` with a scalar value specified by ``val``.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "attributes": [
- {
- "name": "val",
- "type": "float64",
- "default": 0.0,
- "description": "Value of the scalar"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Where",
- "description": "Return elements, either from ``x_true`` or ``x_false``, depending on ``condition``.\n\nIf rank of ``condition`` is higher than those of ``x_true`` and ``x_false``, the first dimensions of ``x_true`` and ``x_false`` must match the dimensions of ``condition``.\n\nExample:\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n a = nn.Variable.from_numpy_array(np.random.rand(2, 3))\n x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))\n y = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))\n z = F.where(F.greater_scalar(a, 0.5), x, y)\n z.forward()\n\n # Numpy equivalent\n z_numpy = np.where(a.d > 0.5, x.d, y.d)\n assert np.allclose(z_numpy, z.d)",
- "inputs": [
- {
- "name": "condition",
- "type": "nnabla.Variable",
- "description": "N-d array. For all i, when ``condition[i] == true``, yield ``x_true[i]``, otherwise ``x_false[i]``."
- },
- {
- "name": "x_true",
- "type": "nnabla.Variable",
- "description": "N-d array with higher or equal rank to ``condition``."
- },
- {
- "name": "x_false",
- "type": "nnabla.Variable",
- "description": "N-d array with higher or equal rank to ``condition``."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as condition"
- }
- ],
- "category": "Logic"
- },
- {
- "name": "Constant",
- "description": "Generate a constant-valued array.",
- "attributes": [
- {
- "name": "val",
- "type": "float32",
- "default": 0.0,
- "description": "Constant value."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of the output array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array where all values are the specified constant."
- }
- ]
- },
- {
- "name": "Arange",
- "description": "Generate a range of values within the half-open interval\n``[start, stop)`` (the interval including start but excluding\nstop) with `step` increments.",
- "attributes": [
- {
- "name": "start",
- "required": true,
- "type": "float32",
- "description": "Start value."
- },
- {
- "name": "stop",
- "required": true,
- "type": "float32",
- "description": "End value."
- },
- {
- "name": "step",
- "type": "float32",
- "default": 1.0,
- "description": "Step value."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "1-D array with the generated values."
- }
- ]
- },
- {
- "name": "Linspace",
- "description": "Generate a one-dimensional vector/tensor of size `num` whose values are evenly spaced from `start` to `end`, inclusive.",
- "attributes": [
- {
- "name": "start",
- "required": true,
- "type": "float32",
- "description": "Start value."
- },
- {
- "name": "stop",
- "required": true,
- "type": "float32",
- "description": "End value."
- },
- {
- "name": "num",
- "required": true,
- "type": "int64",
- "description": "Size of the constructed vector/tensor."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "1-D array with the generated values."
- }
- ]
- },
- {
- "name": "Abs",
- "description": "Element-wise absolute value function.\n\n.. math::\n y_i = |x_i|",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Element-wise absolute variable"
- }
- ]
- },
- {
- "name": "Exp",
- "description": "Element-wise natural exponential function.\n\n.. math::\n y_i = \\exp(x_i).",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Element-wise exp variable"
- }
- ]
- },
- {
- "name": "Log",
- "description": "Element-wise natural logarithm function.\n\n.. math::\n y_i = \\ln(x_i).",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Element-wise log variable"
- }
- ]
- },
- {
- "name": "Identity",
- "description": "Identity function.\n\n.. math::\n y = x",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "BatchMatmul",
- "description": "Batch matrix multiplication.\n\nTwo of batchs of matrices are multiplied for each sample in a batch.\nA batch of matrices is composed as [..., P, Q] where the last two dimensions compose matrix dimensions,\nand the first dimensions up to the third last dimension are considered as batch samples.\nThese batch dimensions are internally broadcasted when the size of a dimension is 1.\n\nExample:\n\n.. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n\n nn.set_auto_forward(True)\n\n # Same batch size\n a = nn.Variable.from_numpy_array(np.random.rand(2, 2, 3, 4))\n b = nn.Variable.from_numpy_array(np.random.rand(2, 2, 4, 3))\n c = F.batch_matmul(a, b)\n\n # Different batch size with the broadcast\n a = nn.Variable.from_numpy_array(np.random.rand(2, 1, 3, 4))\n b = nn.Variable.from_numpy_array(np.random.rand(1, 3, 4, 3))\n c = F.batch_matmul(a, b)\n\n.. WARNING::\n Since the version 1.13, the behavior of the batch dimensions changed, it supported the internal\n broadcast when the size of a dimension is 1. Accordingly, this function does not supports different\n batch dimensions between two inputs even if the total sample size for each input is same.",
- "inputs": [
- {
- "name": "a",
- "type": "nnabla.Variable",
- "description": "N-D array with >= 2-dim. The last two dimensions will be treated as a matrix."
- },
- {
- "name": "b",
- "type": "nnabla.Variable",
- "description": "N-D array with >= 2-dim. The last two dimensions will be treated as a matrix. The product of the size of 0-th dimension through the size of the third last dimension must be same as that of the input ``a``."
- }
- ],
- "attributes": [
- {
- "name": "transpose_a",
- "type": "boolean",
- "default": false,
- "description": "Transpose the last two axes of ``a`` in matrix multiplication."
- },
- {
- "name": "transpose_b",
- "type": "boolean",
- "default": false,
- "description": "Transpose the last two axes of ``b`` in matrix multiplication."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output of sample-wise matrix multiplication in a batch. When ``a`` is of a shape of [N, P, Q], ``b`` is of a shape of [N, Q, R], and transpose options are all False, the output will be a shape of [N, P, R]."
- }
- ]
- },
- {
- "name": "Round",
- "description": "Element-wise round function.\n\nIn the forward pass, this function simply computes `round` to the nearest integer value.\n\n.. math::\n y_i = round(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Ceil",
- "description": "Element-wise ceil function.\n\nIn the forward pass, this function simply returns the smallest integer which is not less than the input.\n\n.. math::\n y_i = ceil(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Floor",
- "description": "Element-wise floor function.\n\nIn the forward pass, this function simply returns the largest integer which is not greater than the input.\n\n.. math::\n y_i = floor(x_i).\n\nIn the backward pass, the simple Straight-Through Estimator (STE) is applied,\n\n.. math::\n \\frac{\\partial y_i}{\\partial x_i} = 1.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Sin",
- "description": "Element-wise sine (sin) function.\n\n.. math::\n y_i = \\sin (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Cos",
- "description": "Element-wise cosine (cos) function.\n\n.. math::\n y_i = \\cos (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Tan",
- "description": "Element-wise tangent (tan) function.\n\n.. math::\n y_i = \\tan (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Sinh",
- "description": "Element-wise hyperbolic sine (sinh) function.\n\n.. math::\n y_i = \\sinh (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Cosh",
- "description": "Element-wise hyperbolic cosine (cosh) function.\n\n.. math::\n y_i = \\cosh (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ASin",
- "description": "Element-wise arcsine (asin) function.\n\n.. math::\n y_i = \\arcsin (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ACos",
- "description": "Element-wise arccosine (acos) function.\n\n.. math::\n y_i = \\arccos (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ATan",
- "description": "Element-wise arctangent (atan) function.\n\n.. math::\n y_i = \\arctan (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ATan2",
- "description": "Element-wise arctangent (atan) function with 2 input variables.\n\n.. math::\n y_i = \\arctan2 (x_{i1}, x_{i2})",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as input variables"
- }
- ]
- },
- {
- "name": "ASinh",
- "description": "Element-wise hyperbolic arcsine (asinh) function.\n\n.. math::\n y_i = \\text{arcsinh} (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ACosh",
- "description": "Element-wise hyperbolic arccosine (acosh) function.\n\n.. math::\n y_i = \\text{arccosh} (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "ATanh",
- "description": "Element-wise hyperbolic arctangent (atanh) function.\n\n.. math::\n y_i = \\text{arctanh} (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Erf",
- "description": "Element-wise Error function.\n\n.. math::\n y_i = \\text{erf} (x_i)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "Concatenate",
- "description": "Concatenate a variable number of input arrays along the specified axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x[0].shape) - 1",
- "description": "Axis"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Concatenate variable"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Split",
- "description": "Split arrays at the specified axis.\n\nnote:\n This function should not be called directly when constructing models.\n Instead, use :meth:`nnabla.functions.split` which\n automatically sets `n_output` from the input's shape and axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "Axis"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "list": true,
- "description": "list of N-D arrays"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Stack",
- "description": "Joins two or more arrays on a new axis.\n\nNote:\n Unlike :meth:`nnabla.functions.concatenate` , which joins arrays on an existing axis,\n Stack joins arrays on a new axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays. The sizes of all the arrays to be stacked must be the same."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "The axis on which to concatenate arrays. Axis indices take on values 0, 1, 2, and so on from the left. For example, to stack four (3,28,28) inputs on the second axis, specify 1. In this case, the output size will be (3,4,28,28)."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Slice",
- "description": "Slice arrays along specified axis.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "start",
- "type": "int64[]",
- "default": "(0,) * len(x.shape)",
- "description": "Start indices for each axis"
- },
- {
- "name": "stop",
- "type": "int64[]",
- "default": "tuple(x.shape)",
- "description": "Stop indices for each axis"
- },
- {
- "name": "step",
- "type": "int64[]",
- "default": "(1,) * len(x.shape)",
- "description": "Step indices for each axis"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Sliced N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Pad",
- "description": "Pad the input N-D array `x` over the number of dimensions given\nby half the length of the `pad_width` iterable, where every two\nvalues in `pad_width` determine the before and after pad size of\nan axis. The `pad_width` iterable must hold an even number of\npositive values which may cover all or fewer dimensions of the\ninput variable `x`. If `pad_width` covers fewer dimensions then\nit applies to the innermost dimensions of `x`.\n\n.. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.ones((2, 3, 4)))\n assert F.pad(x, (1, 1, 2, 2)).shape == (2, 5, 8)\n\nPadding is performed according to the requested `mode`:\n\nconstant\n Pads with a value given by the keyword argument `constant_value`.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'constant', constant_value = -1)\n y.forward()\n assert np.all(y.d == np.array([-1, -1, -1, 1, 2, 3, 4, -1, -1, -1]))\n\nreflect\n Pads with the reflection of the vector mirrored on the first\n and last values of the vector along each axis.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'reflect')\n y.forward()\n assert np.all(y.d == np.array([4, 3, 2, 1, 2, 3, 4, 3, 2, 1]))\n\nrepeat\n Pads with the edge value of the vector along each axis.\n\n .. code-block:: python\n\n x = nn.Variable.from_numpy_array(np.array([1, 2, 3, 4], dtype=np.int))\n y = F.pad(x, (3, 3), 'repeat')\n y.forward()\n assert np.all(y.d == np.array([1, 1, 1, 1, 2, 3, 4, 4, 4, 4]))",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "pad_width",
- "required": true,
- "type": "int64[]",
- "description": "Iterable of *before* and *after* pad values."
- },
- {
- "name": "mode",
- "type": "string",
- "default": "constant",
- "description": "Padding mode string."
- },
- {
- "name": "constant_value",
- "type": "float32",
- "default": 0.0,
- "description": "Fill value if mode is `constant`."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Padded N-D array with the same number of dimensions as the input.\n\n.. code-block:: python\n\n x = nn.Variable((3, 3, 4, 2)) # a shape like (B, C, H, W)\n # 1-D padding: last dim by 1 left and 2 on the right side\n assert F.pad(x, (1, 2)).shape == (3, 3, 4, 5)\n # 2-D padding: last dim by (1, 1) and 2nd to last by (2, 2)\n assert F.pad(x, (2, 2, 1, 1)).shape == (3, 3, 8, 4)\n # 3-D padding: dims C by (0, 1), H by (2, 1), and W by (3, 3)\n assert F.pad(x, (0, 1, 2, 1, 3, 3)).shape == (3, 4, 7, 8)"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Transpose",
- "description": "Transposes tensor dimensions.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "required": true,
- "type": "int64[]",
- "description": "Source axis indices for each axis."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Transposed N-D array."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Broadcast",
- "description": "Broadcasting ND-array to the specified shape.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "required": true,
- "type": "shape",
- "description": "Shape broadcasted to. The size must be the same in axis where ``x``'s shape is not 1."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Broadcasted N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BroadcastTo",
- "description": ".. WARNING::\n This function is experimental support, so please do not actively use it.\n\nBroadcasting ND-array to the specified buffer.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- },
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": -1,
- "description": "Target axis to start broadcasting. If this is not set, broadcast will try to fit y to x starting from the last dimension"
- }
- ],
- "outputs": [
- {
- "name": "z",
- "type": "nnabla.Variable",
- "description": "Broadcasted N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Tile",
- "description": "Forward input `x` repeated the number of times given by `reps`. If `reps`\nis a sequence, the output has dimension of ``d = max(len(reps), x.ndim)``\nand either `x` is promoted to be d-dimensional by prepending new axes or\n`reps` is promoted to x.ndim by prepending 1's.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "reps",
- "required": true,
- "type": "int64[]",
- "description": "The number of repetitions of `x` along each axis."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "OneHot",
- "description": "This function creates one-hot vector based on input indices.\nThe range [-shape[i], -1] of input indices are regarded as [0, shape[i]-1],\nand an input index outside [-shape[i], shape[i]-1] generates a vector \nfilled with zero.\n\n Example:\n\n .. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n\n labels = nn.Variable.from_numpy_array(np.array([[9], [4], [5], [-9], [10]]))\n print(labels.shape) # (5, 1)\n\n num_class = 10\n\n y_train = F.one_hot(labels, shape=(num_class, ))\n y_train.forward()\n\n print(y_train.shape) # (5, 10)\n print(y_train.d)\n\n # [[0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]\n # [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n # [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n\n # Can also be used for ndarray.\n\n labels = nn.Variable.from_numpy_array(np.array([[1, 7], [4, 7], [8, 6], [5, 0], [2, 6]]))\n print(labels.shape) # (5, 2)\n\n num_class_1, num_class_2 = 10, 8\n\n y_train = F.one_hot(labels, shape=(num_class_1, num_class_2))\n y_train.forward()\n\n print(y_train.shape) # (5, 10, 8)\n print(y_train.d)\n\n # [[[0. 0. 0. 0. 0. 0. 0. 0.] [[0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] ... [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0.]\n # [0. 0. 0. 0. 0. 0. 0. 0.]], [0. 0. 0. 0. 0. 0. 0. 0.]]]",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array representing label's indice."
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "required": true,
- "type": "shape",
- "description": "Number of classes. When nd-labels are given, dimensions must match. See the example above."
- }
- ],
- "outputs": [
- {
- "name": "output",
- "type": "nnabla.Variable",
- "description": "N-D array one-hot vector/tensor."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Flip",
- "description": "Reverses the order of elements of the specified dimension of an array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "[len(x.shape) - 1]",
- "description": "The index of the dimension to reverse the order of the elements. Axis indices take on values 0, 1, 2, and so on from the left. For example, to flip a 32 (W) by 24 (H) 100 RGB image (100,3,24,32) vertically and horizontally, specify (2,3)."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Shift",
- "description": "Shifts the array elements by the specified amount.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "shifts",
- "type": "int64[]",
- "default": "(0,) * len(x.shape)",
- "description": "The amount to shift elements. For example, to shift image data to the right by 2 pixels and up 3 pixels, specify (-3,2)."
- },
- {
- "name": "border_mode",
- "type": "string",
- "default": "nearest",
- "description": "Specify how to process the ends of arrays whose values will be undetermined as a result of shifting. nearest: The data at the ends of the original array is copied and used. reflect: Original data reflected at the ends of the original array is used."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Sort",
- "description": "Sorts the elements of `x` along a given `axis` in ascending\norder by value. A negative `axis` counts from the last dimension\nof `x`, so the default of -1 sorts along the last dimension. If\n`reverse` is True, then the elements are sorted in descending\norder.\n\nIf `with_index` is True, result is a tuple ``(sorted, indices)``\nor only ``indices`` if `only_index` is True. Setting\n`only_index` to True implies that `with_index` is also True.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": -1,
- "description": "Axis along which to sort."
- },
- {
- "name": "reverse",
- "type": "boolean",
- "default": false,
- "description": "Sort in descending order."
- },
- {
- "name": "with_index",
- "type": "boolean",
- "default": false,
- "description": "Return sorted values and index."
- },
- {
- "name": "only_index",
- "type": "boolean",
- "default": false,
- "description": "Return only the sort index."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "list": true,
- "description": "list of N-D arrays"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Reshape",
- "description": "Reshapes the input variable in-place. It does not create a copy of the variable.\nThe output variable (y) has a new shape but points to the same data as the input variable (x).\nThis means that if the data in the output variable (y) is modified, the data in the input\nvariable (x) also gets modified since the reshape was done in-place.\n\nNote:\n This function has the same behavior as the :meth:`nnabla.Variable.reshape` method.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "required": true,
- "type": "shape",
- "description": "Dimensions for each axis. ``-1`` can be specified only in one shape dimension. The value is calculated from the size of the array and remaining dimensions."
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": true,
- "description": "The output array is shared with the input array if True."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Reshaped N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Shape",
- "description": "Get the shape of a tensor. Optional attributes start and end can be used to compute\na slice of the input tensor's shape. If start axis is omitted, the slice starts from\naxis 0.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "start",
- "type": "int64",
- "default": 0,
- "description": "If start axis is omitted, the slice starts from axis 0."
- },
- {
- "name": "end",
- "type": "int64",
- "default": 0,
- "description": "The end axis, if specified, is exclusive (and the returned value will not include."
- }
- ],
- "outputs": [
- {
- "name": "shape",
- "type": "nnabla.Variable",
- "description": "1-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "MatrixDiag",
- "description": "Returns an array where the last two dimensions consist of the diagonal matrix.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N \\times M_N`)."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "MatrixDiagPart",
- "description": "Returns an array in which the values of the last dimension consist of the diagonal\nelements of the last two dimensions of an input array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N \\times M_N`)."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Trilu",
- "description": "Returns an array in which the values of the last dimension consist of the triangular\nmatrix of the last two dimensions of an input array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)."
- }
- ],
- "attributes": [
- {
- "name": "k",
- "type": "int64",
- "default": 0,
- "description": "The number diagonals above or below the main diagonal to exclude or include."
- },
- {
- "name": "upper",
- "type": "boolean",
- "default": true,
- "description": "Determine whether upper or lower part of matrix is retained."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with shape (:math:`M_0 \\times \\ldots \\times M_N`)."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Meshgrid",
- "description": "Return coordinate matrices from coordinate vectors. Given N 1-D arrays, this function returns N-D coordinate arrays for vectorized evaluations on an N-D grid.\nExample: \n >>> x,y = F.meshgrid(F.arange(0,3), F.arange(0,2))\n >>> x.d\n array([[0., 1., 2.],\n [0., 1., 2.]], dtype=float32)\n >>> y.d \n array([[0., 0., 0.],\n [1., 1., 1.]], dtype=float32)\n\n >>> i,j = F.meshgrid(F.arange(0,3), F.arange(0,2), ij_indexing=True)\n >>> i.d \n array([[0., 0.],\n [1., 1.],\n [2., 2.]], dtype=float32)\n >>> j.d \n array([[0., 1.],\n [0., 1.],\n [0., 1.]], dtype=float32)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays."
- }
- ],
- "attributes": [
- {
- "name": "ij_indexing",
- "type": "boolean",
- "default": false,
- "description": "If set true (Matrix ('ij') indexing ), the broadcasting dimensions are swapped. Default is False (Cartesian ('xy') indexing )."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "list": true,
- "description": "N-D arrays"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BatchDet",
- "description": "Batch-wise determinant function.\n\n.. math::\n Y_b = \\det(X_b), \n\nwhere :math:`X_b` and :math:`Y_b` are the :math:`b`-th input and output, respectively.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "batched N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "batched N-D array of determinant"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BatchInv",
- "description": "Returns an array of inverted matrix",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "batched N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "batched N-D array of inverted matrix"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BatchLogdet",
- "description": "Batch-wise log absolute determinant function.\n\n.. math::\n Y_b = \\log(|\\det(X_b)|), \n\nwhere :math:`X_b` and :math:`Y_b` are the :math:`b`-th input and output, respectively.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "batched N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "batched N-D array of log absolute determinant"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BatchCholesky",
- "description": "Batch-wise cholesky decomposition of symmetric positive definite matrix.\nThe gradient of this function will be a symmetric matrix.\nThis function does not check whether given matrix is symmetric positive define matrix or not.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "batched N-D array"
- }
- ],
- "attributes": [
- {
- "name": "upper",
- "type": "boolean",
- "default": false,
- "description": "If true, will return an upper triangular matrix. Otherwise will return a lower triangular matrix."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "batched N-D array of lower/upper triangular matrix."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Assign",
- "description": "Assign source array to destination array just like `tf.assign`.\nThis is useful to synchronize or manually update parameters.\n\n.. code-block:: python\n\n dst = nn.Variable((2, 3, 4))\n src = nn.Variable((2, 3, 4))\n assign = F.assign(dst, src)\n\n assign.forward()\n assert np.allclose(dst.d, src.d) # dst and src have identical values.\n assert np.allclose(assign.d dst.d) # returned Variable is also identical to dst.\n\nUnlike TensorFlow, the returned Variable has a backward path to `dst`:\n\n.. math::\n\n g_{dst} = g_{y}",
- "inputs": [
- {
- "name": "dst",
- "type": "nnabla.Variable",
- "description": "A destination N-D array"
- },
- {
- "name": "src",
- "type": "nnabla.Variable",
- "description": "A source N-D array"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "An assigned array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Gather",
- "description": "Gather from the input data according to the index.\n\nGiven the input data :math:`X` of :math:`(D_{0}, \\ldots, D_{N-1})` shape and\nthe indices :math:`IDX` of :math:`(I_{0}, \\ldots, I_{M-1})` shape, in case of `batch_dims = 0`,\nthe gather outputs\n\n.. math::\n && Y[d_{0}, \\ldots, d_{axis - 1}, i_{0}, \\ldots, i_{M-1}, d_{axis + 1}, \\ldots, d_{N-1}] = \\\\\n && X[d_{0}, \\ldots, d_{axis - 1}, IDX[i_{0}, \\ldots, i_{M-1}], d_{axis + 1}, \\ldots, d_{N-1}].\n\nGenerally, the gather outputs\n\n.. math::\n && Y[d_{0}, \\ldots, d_{axis - 1}, i_{B}, \\ldots, i_{M-1}, d_{axis + 1}, \\ldots, d_{N-1}] = \\\\\n && X[d_{0}, \\ldots, d_{axis - 1}, IDX[i_{0}, \\ldots, i_{B - 1}, i_{B} \\ldots, i_{M-1}], d_{axis + 1}, \\ldots d_{N-1}].\n\nwhere :math:`B` = `batch_dims`.\n\n`x.shape[:batch_dims]` must be equal to `indices.shape[:batch_dims]`.\n\nOutput shape is `x.shape[:axis] + indices.shape[batch_dims:] + x.shape[axis + 1]`.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Data from which to gather."
- },
- {
- "name": "Indices",
- "type": "nnabla.Variable",
- "description": "Index with which to gather."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "Axis in `x` to gather from. `axis` must be greater than or equal to `batch_dims`."
- },
- {
- "name": "batch_dims",
- "type": "int64",
- "default": 0,
- "description": "The number of batch dimensions."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Gathered output."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "GatherNd",
- "description": "Gather elements or slices from `data` according to `indices`, which must\nbe at least two-dimensional with the first dimension :math:`M` being less\nor equal to the :math:`N` dimensions of `data`. Given `data` with shape\n:math:`(X_0, X_1, ..., X_{N-1})` and indices with shape\n:math:`(M, Y_0, ..., Y_{K-1})` output has shape\n:math:`(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`. If :math:`M == N`, output\nshape is simply :math:`(Y_0, ..., Y_{K-1})`.\n\nThe forward of :func:`~nnabla.functions.gather_nd` is equivalent to:\n\n.. code-block:: python\n\n def gather_nd(data, index):\n import numpy as np\n tmp_index = index.reshape(index.shape[0], -1)\n tmp_index = (idx + (Ellipsis,) for idx in zip(*new_index))\n out_shape = index.shape[1:] + data.shape[index.shape[0]:]\n return np.vstack(data[idx] for idx in tmp_index).reshape(*out_shape)\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> data = F.arange(1, 11).reshape([2, 5])\n>>> print(data.d)\n[[ 1. 2. 3. 4. 5.]\n [ 6. 7. 8. 9. 10.]]\n>>> F.gather_nd(data, [[1, 1, 0]]).shape\n(3, 5)\n>>> F.gather_nd(data, [[1, 1, 0], [0, 1, 0]]).shape\n(3,)\n>>> print(F.gather_nd(data, [[1, 1, 0], [0, 1, 0]]).d)\n[6. 7. 1.]\n>>> print(F.gather_nd(data, [[1, 1, 0]]).d)\n[[ 6. 7. 8. 9. 10.]\n [ 6. 7. 8. 9. 10.]\n [ 1. 2. 3. 4. 5.]]\n\nWhen `indices` is provided as a :obj:`~nnabla.Variable` it will be\npossible to change the actual index values after function creation.\nIt is important to note that out-of-bound indices raise error when\nrunning on CPU but are ignored when using an accelerated computation\ncontext.\n\n>>> indices = nn.Variable((2, 1))\n>>> indices.d = [[0], [0]]\n>>> y = F.gather_nd(data, indices)\n>>> print(y.d)\n[1.]\n>>> indices.d = [[1], [4]]\n>>> y.forward()\n>>> print(y.d)\n[10.]",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array input data"
- },
- {
- "name": "indices",
- "type": "nnabla.Variable",
- "description": "N-D array indices"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BoolGather",
- "description": "Gather from the input data according to the mask. \n\nGiven an input of :math:`(B_1, \\ldots, B_N, D_1, \\ldots, D_M)` shape and mask of :math:`(B_1, \\ldots, B_N)` shape, the function returns an output of :math:`(nnz, D_1, \\ldots, D_M)` shape and :math:`nnz` is the number of non-zero elements in mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input = nn.Variable.from_numpy_array([[1, 2], [3, 4], [5, 6]])\n mask = nn.Variable.from_numpy_array([1, 0, 1])\n output = F.bool_gather(input, mask)\n \n print(output.d) # [[1, 2], [5, 6]]\n\n\nNote that this function is normally used with the dynamic graph \nsince this function outputs a variable-length output. If used with the static graph, \na network has to be constructed all time in iteration.",
- "inputs": [
- {
- "name": "input",
- "type": "nnabla.Variable",
- "description": "Data from which to gather."
- },
- {
- "name": "mask",
- "type": "nnabla.Variable",
- "description": "Mask with which to gather. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask."
- }
- ],
- "outputs": [
- {
- "name": "output",
- "type": "nnabla.Variable",
- "description": "Gathered output."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "ScatterNd",
- "description": "Scatter `data` into a new array of given `shape` according to `indices`.\nThis operation is the inverse of :func:`~nnabla.functions.gather_nd`.\n\nThe forward of :func:`~nnabla.functions.scatter_nd` is equivalent to:\n\n.. code-block:: python\n\n def scatter_nd(data, indices, shape):\n import numpy as np\n if isinstance(indices, np.ndarray)\n indices = indices.tolist()\n result = np.zeros(shape, dtype=data.dtype)\n result[indices] = data\n return result\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> data = nn.Variable.from_numpy_array(np.array([9, 10, 11, 12]))\n>>> indices = nn.Variable.from_numpy_array(np.array([[4, 3, 1, 7]]))\n>>> scattered = F.scatter_nd(data, indices, shape=(8,))\n>>> print(scatterd.d)\n[ 0. 11. 0. 10. 9. 0. 0. 12.]\n>>> print(F.gather_nd(scattered, indices).d)\n[ 9. 10. 11. 12.]",
- "inputs": [
- {
- "name": "data",
- "type": "nnabla.Variable",
- "description": "N-D array input data."
- },
- {
- "name": "indices",
- "type": "nnabla.Variable",
- "description": "N-D array scatter indices."
- },
- {
- "name": "out",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "existing output array"
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "type": "int64[]",
- "default": "None",
- "description": "Shape of output variable."
- },
- {
- "name": "add",
- "type": "boolean",
- "default": false,
- "description": "Add the input data to the same destination specified by the indices."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of given `shape`."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "ScatterAdd",
- "description": "Add all values from `x1` into the `x0` according to index specified by `indices`.\nThis function adds `x1` into the copy of `x0` and outputs the copy.\nThe original `x0` will not be changed.\n`x0`, `indices` and `x1` must have same number of dimensions.\n\nThe forward of :func:`~nnabla.functions.scatter_add` is equivalent to:\n\n.. code-block:: python\n\n def scatter_add(x0, indices, x1, axis):\n # Assuming each input is 3 dimensional\n import numpy as np\n output = np.copy(x0)\n for i in range(indices.shape[0]):\n for j in range(indices.shape[1]):\n for k in range(indices.shape[2]):\n if axis == 0:\n output[indices[i][j][k]][j][k] += x1[i][j][k]\n elif axis == 1:\n output[i][indices[i][j][k]][k] += x1[i][j][k]\n elif axis == 2:\n output[i][j][indices[i][j][k]] += x1[i][j][k]\n return output",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array which the data is added to its copy."
- },
- {
- "name": "indices",
- "type": "nnabla.Variable",
- "description": "N-D array scatter indices. The size of each dimension must be equal or smaller than that of x0 except for the specified axis. The value of indices must be smaller than the size of specified axis' dimension of x0. The size of each dimension must be equal or smaller than that of x1. Indices must not be negative."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array which is scattered and added to x0."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": 0,
- "description": "Axis along which to index. The axis must not exceed the inputs' dimension."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array which contains the result of scatter addition. The shape is same as x0."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BoolScatter",
- "description": "Scatter the `input` according to the `mask`.\n\nGiven an input of :math:`(nnz, D_1, \\ldots, D_M)` shape and mask of :math:`(B_1, \\ldots, B_N)` shape, the function returns an output :math:`(B_1, \\ldots, B_N, D_1, \\ldots, D_M)` and :math:`nnz` is the number of non-zero elements in the mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input0 = nn.Variable.from_numpy_array([[1, 2], [3, 4], [5, 6]])\n mask = nn.Variable.from_numpy_array([1, 0, 1])\n output0 = F.bool_gather(input0, mask)\n \n input1 = output0 + 10\n output1 = F.bool_scatter(input1, mask)\n \n print(output1.d) # [[11, 12], [0, 0], [15, 16]] \n\nNote that the higher-order gradients of this function relies on F.gather, thus \nthe higher-order gradients of this function is normally used with the dynamic graph.",
- "inputs": [
- {
- "name": "input",
- "type": "nnabla.Variable",
- "description": "Data to be scattered."
- },
- {
- "name": "mask",
- "type": "nnabla.Variable",
- "description": "Mask with which to scatter. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask."
- },
- {
- "name": "output",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Destination of output. If specified, data are inplaced."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Scattered output."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "BoolFill",
- "description": "Fill the data with the value to according to the mask.\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n nn.set_auto_forward(True)\n\n input = nn.Variable.from_numpy_array([[np.inf, 2], [3, np.nan]])\n mask = nn.Variable.from_numpy_array([[1, 0], [0, 1]])\n output = F.bool_fill(input, mask, -1)\n \n print(output.d) # [[-1, 2], [3, -1]]",
- "inputs": [
- {
- "name": "data",
- "type": "nnabla.Variable",
- "description": "Data to be filled."
- },
- {
- "name": "mask",
- "type": "nnabla.Variable",
- "description": "Mask with which to fill. Non-zero/zero elements are supposed to be a binary mask as 1/0. No gradients are computed with respect to mask."
- }
- ],
- "attributes": [
- {
- "name": "value",
- "type": "float32",
- "default": 0.0,
- "description": "Value to fill."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Filled output."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "PackPaddedSequence",
- "description": "Pack a padded variable-length sequences.\n\nThis method packs a padded variable-length sequences.\n\n:math:`T_i` is the length of the :math:`i`-th Variable in the sequences.\n:math:`B` is the batch size equal to the length of the sequences.\n:math:`T` is the max of :math:`T_i` for all :math:`i`.\n:math:`*` is the remaining dimensions including none.\n\n.. note::\n This function assumes the length-sorted padded sequence in the decreasing order\n and must be used by :func:`~nnabla.utils.rnn.pack_padded_sequence` in the dynamic computation mode.\n See :",
- "inputs": [
- {
- "name": "padded_sequence",
- "type": "nnabla.Variable",
- "description": "Padded sequence of (:math:`T \\times B \\times *`) or (:math:`B \\times T \\times *`) shape."
- },
- {
- "name": "lengths",
- "type": "nnabla.Variable",
- "description": "Sequence length for each batch and always resides in CPU."
- }
- ],
- "attributes": [
- {
- "name": "batch_first",
- "type": "boolean",
- "default": false,
- "description": "`padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,\notherwise (:math:`B`, :math:`T`, :math:`*`)."
- }
- ],
- "outputs": [
- {
- "name": "pack_sequence",
- "type": "nnabla.Variable",
- "description": "Packed sequence of (:math:`N`, :math:`*`) shape."
- },
- {
- "name": "batch_sizes",
- "type": "nnabla.Variable",
- "description": "Batch size for each time and always resides in CPU."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "PadPackedSequence",
- "description": "Pad packed sequence.\n\nThis method unpacks the packed sequqnce and pad it, the inverse operation of :func:`pack_padded_sequence`.\n\n:math:`T_i` is the length of the :math:`i`-th Variable in the sequences.\n:math:`B` is the batch size equal to the length of the sequences.\n:math:`T` is the max of :math:`T_i` for all :math:`i`.\n:math:`*` is the remaining dimensions including none.\n\n.. note::\n This function assumes the output of the length-sorted padded sequence in the decreasing order\n and must be used by :func:`~nnabla.utils.rnn.pad_packed_sequence` in the dynamic computation mode.",
- "inputs": [
- {
- "name": "packed_sequence",
- "type": "nnabla.Variable",
- "description": "Packed sequence of (:math:`N`, :math:`*`) shape."
- },
- {
- "name": "batch_sizes",
- "type": "nnabla.Variable",
- "description": "Batch size for each time and always resides in CPU."
- }
- ],
- "attributes": [
- {
- "name": "batch_first",
- "type": "boolean",
- "default": false,
- "description": "`padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,\notherwise (:math:`B`, :math:`T`, :math:`*`)."
- },
- {
- "name": "padding_value",
- "type": "float32",
- "default": 0.0,
- "description": "Padding value."
- },
- {
- "name": "total_length",
- "type": "int64",
- "default": -1,
- "description": "If not None, the outputs are padded up to the `total_length`.\nIf the `total_length` is less than the max length in the `sequences`,\nthe error is thrown."
- }
- ],
- "outputs": [
- {
- "name": "padded_sequence",
- "type": "nnabla.Variable",
- "description": "Padded sequence of (:math:`T \\times B \\times *`) or (:math:`B \\times T \\times *`) shape."
- },
- {
- "name": "lengths",
- "type": "nnabla.Variable",
- "description": "Sequence length for each batch and always resides in CPU."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "NonZero",
- "description": "Find indices of non-zero elements.\n\nNonZero behaves similar to NonZero Operator in ONNX.\n\nExamples:\n\n>>> import numpy as np, nnabla as nn, nnabla.functions as F\n>>> nn.set_auto_forward(True)\n>>> x = F.arange(1, 10).reshape([3, 3])\n>>> x.d[0, 1] = x.d[1, 2] = x.d[2, 2] = 0\n>>> print(x.d)\n[[1. 0. 3.],\n [4. 5. 0.],\n [7. 8. 0.]]\n>>> y = F.nonzero(x)\n>>> print(y.shape)\n(2, 6)\n>>> print(y.d)\n[[0 0 1 1 2 2],\n [0 2 0 1 0 1]]\n\nNote that this function is normally used with the dynamic graph \nsince this function outputs a variable-length output. If used with \nthe static graph, a network has to be constructed all time in iteration.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D arrays."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array indices."
- }
- ],
- "category": "Shape"
- },
- {
- "name": "Interpolate",
- "description": "Resize an ND array with interpolation.\n\nThe last ``len(output_size)`` dimensions of the input ``x`` are considered as the spatial dimensions to be resized.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "output_size",
- "required": true,
- "type": "int64[]",
- "description": "Output size."
- },
- {
- "name": "mode",
- "required": true,
- "type": "string",
- "description": "Interpolation mode chosen from ('nearest'|'linear')."
- },
- {
- "name": "align_corners",
- "type": "boolean",
- "default": true,
- "description": "If true, the corner pixels of input and output arrays are aligned, such that the output corner pixels have the same values with the input corner pixels. The default is ``None``, and it becomes `True` if mode is 'linear', otherwise `False`."
- },
- {
- "name": "half_pixel",
- "type": "boolean",
- "default": false,
- "description": "If true, in the coordinate transformation, 0.5 is added to the output coordinate and 0.5 is subtracted from the input coordinate after scaling."
- },
- {
- "name": "half_pixel_for_nn",
- "type": "boolean",
- "default": false,
- "description": "This is a special argument to support the backward-compatibility of the nearest neighbor interpolation. Default is `False`. When in ``True``, the implementation of nearest neighbor interpolation is the old one."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "ONNXResize",
- "description": "Resize an ND array with interpolation. This function provides a \ncompatible interface to ONNX Resize.\n\nReferences:\n * `ONNX Operators documentation.\n <https://github.com/onnx/onnx/blob/main/docs/Operators.md>`",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "roi",
- "type": "float32[]",
- "default": "()",
- "description": "RoIs for tf_crop_and_resize."
- },
- {
- "name": "scales",
- "type": "float32[]",
- "default": "()",
- "description": "Scale factors along axes."
- },
- {
- "name": "sizes",
- "type": "int64[]",
- "default": "()",
- "description": "Output size."
- },
- {
- "name": "mode",
- "type": "string",
- "default": "nearest",
- "description": "Interpolation mode chosen from ('nearest'|'linear'|'cubic')."
- },
- {
- "name": "coordinate_transformation_mode",
- "type": "string",
- "default": "half_pixel",
- "description": "How to transform the coordinate in the resized tensor to the coordinate in the original tensor. This mode is chosen from ('half_pixel'|'pytorch_half_pixel'|'align_corners'|'asymmetric'|'tf_crop_and_resize')."
- },
- {
- "name": "cubic_coeff_a",
- "type": "float32",
- "default": -0.75,
- "description": "The coefficient used in cubic interpolation."
- },
- {
- "name": "exclude_outside",
- "type": "int64",
- "default": 0,
- "description": "Whether to set coefficients to zero when sampling locations is outside the input tensor."
- },
- {
- "name": "extrapolation_value",
- "type": "float32",
- "default": 0.0,
- "description": "An extrapolation value used when a sampling location is outside the input tensor at tf_crop_and_resize mode."
- },
- {
- "name": "nearest_mode",
- "type": "string",
- "default": "round_prefer_floor",
- "description": "Rounding mode for nearest-neighbor interpolation."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "FFT",
- "description": "Complex-to-complex Discrete Fourier Transform,\n\n.. math::\n\n X_{k_1, \\ldots, k_d} = \\sum_{n_1=0}^{N_1-1} \\dots \\sum_{n_d=0}^{N_d-1} x_{n_1, \\ldots, n_d} \\exp\\left(-2 \\pi j \\left( \\sum_{i=0}^{d} \\frac{k_i n_i}{N_i} \\right) \\right),\n\nwhere\n\n.. math::\n\n k_i = 0, \\ldots, N_i - 1.\n\nThis function now supports 1-D, 2-D, and 3-D DFT with or without the leading batch dimension(s).\n\nThe input is expected to be complex-valued with at least signal_ndim + 1 dimensions.\nThe last dimension has a shape of two where x[..., 0] is the real part and x[..., 1] the imaginary part.\n\nExample:\n\n.. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n from nnabla.ext_utils import get_extension_context\n\n ctx = get_extension_context(\"cudnn\")\n nn.set_default_context(ctx)\n\n # Example for a batched 2D-FFT and 2D-IFFT (batch-size: 2, data-size: 4x3)\n x_data = np.random.rand(2, 4, 3) + 1j * np.random.rand(2, 4, 3)\n x = nn.Variable.from_numpy_array(np.stack([np.real(x_data), np.imag(x_data)], axis=3))\n y = F.fft(x, signal_ndim=2, normalized=True)\n z = F.ifft(y, signal_ndim=2, normalized=True)\n z.forward()\n\n np.allclose(z.d[..., 0] + 1j*z.d[...,1], x_data)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input."
- }
- ],
- "attributes": [
- {
- "name": "signal_ndim",
- "required": true,
- "type": "int64",
- "description": "The number of dimensions for each signal. It must be 1, 2, or 3."
- },
- {
- "name": "normalized",
- "type": "boolean",
- "default": false,
- "description": "Use unitary normalization. If `True`, the normalization constant :math:`\\sqrt{\\frac{1}{\\prod_{i=1}^{d} N_i}}` is multiplied."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "FFT transformed signal."
- }
- ]
- },
- {
- "name": "IFFT",
- "description": "Complex-to-complex inverse Discrete Fourier Transform,\n\n.. math::\n\n X_{k_1, \\ldots, k_d} = \\frac{1}{\\prod_{i=1}^{d} N_i} \\sum_{n_1=0}^{N_1-1} \\dots \\sum_{n_d=0}^{N_d-1} x_{n_1, \\ldots, n_d} \\exp\\left(2 \\pi j \\left( \\sum_{i=0}^{d} \\frac{k_i n_i}{N_i} \\right) \\right),\n\nwhere\n\n.. math::\n\n k_i = 0, \\ldots, N_i - 1.\n\nThis function now supports 1-D, 2-D, and 3-D DFT with or without the leading batch dimension(s).\n\nThe input is expected to be complex-valued with at least signal_ndim + 1 dimensions.\nThe last dimension has a shape of two where x[..., 0] is the real part and x[..., 1] the imaginary part.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input."
- }
- ],
- "attributes": [
- {
- "name": "signal_ndim",
- "required": true,
- "type": "int64",
- "description": "The number of dimensions for each signal. It must be 1, 2, or 3."
- },
- {
- "name": "normalized",
- "type": "boolean",
- "default": false,
- "description": "Use unitary normalization. If `True`, the normalization constant :math:`\\frac{1}{\\prod_{i=1}^{d} N_i}` becomes :math:`\\sqrt{\\frac{1}{\\prod_{i=1}^{d} N_i}}`."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "IFFT transformed signal."
- }
- ]
- },
- {
- "name": "STFT",
- "description": "Short-time Fourier transform.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Time domain sequence of size `batch_size x sample_size`."
- }
- ],
- "attributes": [
- {
- "name": "window_size",
- "required": true,
- "type": "int64",
- "description": "Size of STFT analysis window."
- },
- {
- "name": "stride",
- "required": true,
- "type": "int64",
- "description": "Number of samples that we shift the window, also called `hop size`."
- },
- {
- "name": "fft_size",
- "required": true,
- "type": "int64",
- "description": "Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins."
- },
- {
- "name": "window_type",
- "type": "string",
- "default": "hanning",
- "description": "Analysis window, can be either `hanning`, `hamming` or `rectangular`."
- },
- {
- "name": "center",
- "type": "boolean",
- "default": true,
- "description": "If `True`, then the signal `x` is padded by half the FFT size using reflection padding."
- },
- {
- "name": "pad_mode",
- "type": "string",
- "default": "reflect",
- "description": "Padding mode, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`."
- },
- {
- "name": "as_istft_backward",
- "type": "boolean",
- "default": false,
- "description": "If `True`, then forward execution behaves as backward execution of ISTFT, treating input `x` as output gradient of ISTFT and outputs `y_r` and `y_i` as inputs gradient of ISTFT. This option is only used in nn.grad operator."
- }
- ],
- "outputs": [
- {
- "name": "y_r",
- "type": "nnabla.Variable",
- "description": "Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`."
- },
- {
- "name": "y_i",
- "type": "nnabla.Variable",
- "description": "Imaginary part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`."
- }
- ]
- },
- {
- "name": "ISTFT",
- "description": "Inverse short-time Fourier transform.\n\n.. note::\n We use a constant square inverse window for the reconstruction of the time-domain signal, therefore, the first and last `window_size - stride` are not perfectly reconstructed.",
- "inputs": [
- {
- "name": "y_r",
- "type": "nnabla.Variable",
- "description": "Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`."
- },
- {
- "name": "y_i",
- "type": "nnabla.Variable",
- "description": "Imaginary part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`."
- }
- ],
- "attributes": [
- {
- "name": "window_size",
- "required": true,
- "type": "int64",
- "description": "Size of STFT analysis window."
- },
- {
- "name": "stride",
- "required": true,
- "type": "int64",
- "description": "Number of samples that we shift the window, also called `hop size`."
- },
- {
- "name": "fft_size",
- "required": true,
- "type": "int64",
- "description": "Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins."
- },
- {
- "name": "window_type",
- "type": "string",
- "default": "hanning",
- "description": "Analysis window, can be either `hanning`, `hamming` or `rectangular`."
- },
- {
- "name": "center",
- "type": "boolean",
- "default": true,
- "description": "If `True`, then the signal `x` is padded by half the FFT size using reflection padding."
- },
- {
- "name": "pad_mode",
- "type": "string",
- "default": "reflect",
- "description": "Padding mode corresponding to STFT `pad_mode`, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`. This option is ignored for the normal use of ISTFT. You need to set the same `pad_mode` only when `as_stft_backward == True`."
- },
- {
- "name": "as_stft_backward",
- "type": "boolean",
- "default": false,
- "description": "If `True`, then forward execution behaves as backward execution of STFT, treating inputs `y_r` and `y_i` as outputs gradient of STFT and output `x` as input gradient of STFT. This option is only used in nn.grad operator."
- }
- ],
- "outputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Time domain sequence of size `batch_size x sample_size`."
- }
- ]
- },
- {
- "name": "Dropout",
- "description": "Dropout.\nSamples a number :math:`u` from a uniform distribution in :math:`[0, 1]` ,\nand ignores the input if :math:`u \\leq p`.\n\n.. math::\n y = \\left\\{\n \\begin{array}{ll}\n \\frac{x}{1 - p} & (u > p) \\\\\n 0 & ({\\rm otherwise})\n \\end{array} \\right.\n\nNote:\n Usually dropout only applied during training as below\n (except `MC dropout`_). If you want to use dropout as an MC dropout, remove 'if train:'.\n\n .. code-block:: python\n\n h = PF.affine(x, num_hidden)\n if train:\n h = F.dropout(h, 0.5)\n\n.. _MC dropout: https://arxiv.org/abs/1506.02142",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "p",
- "type": "float64",
- "default": 0.5,
- "description": ":math:`p` in definition."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "TopKData",
- "description": "Select the `k` largest values from each sample in `x` to\npropagate unmodified and set all other values to 0. If `abs` is\nTrue, the `k` largest values are selected by magnitude. If\n`reduce` is True (the default), all feature dimensions are\nreduced to a single dimension of size `k` that propagates only\nthe `k` largest values. Otherwise, if `reduce` is False, input\nand output dimensions are identical. Dimensions before\n`base_axis` are treated as number of sample dimensions and `k`\nvalues get selected from all elements of a sample (dimensions\nfrom `base_axis`) regardless of shape.\n\n>>> import nnabla as nn, nnabla.functions as F\n>>> x = nn.Variable((4, 5, 6))\n>>> F.top_k_data(x, 3, reduce=False).shape\n(4, 5, 6)\n>>> F.top_k_data(x, 3, reduce=True).shape\n(4, 3)\n>>> F.top_k_data(x, 3, reduce=True, base_axis=2).shape\n(4, 5, 3)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "k",
- "required": true,
- "type": "int64",
- "description": "Number of largest data values to propagate."
- },
- {
- "name": "abs",
- "type": "boolean",
- "default": false,
- "description": "Determine largest data values by magnitude."
- },
- {
- "name": "reduce",
- "type": "boolean",
- "default": true,
- "description": "Reduce feature size to one dimension of size `k`."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "First dimension of the sample shape."
- },
- {
- "name": "largest",
- "type": "boolean",
- "default": true,
- "description": "Whether to select the `k` largest or smallest values."
- },
- {
- "name": "with_index",
- "type": "boolean",
- "default": false,
- "description": "Return top-k values and indices."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- },
- {
- "name": "indices",
- "type": "nnabla.Variable",
- "description": "N-D array of top-k indices."
- }
- ]
- },
- {
- "name": "TopKGrad",
- "description": "Select the `k` largest gradients for each sample in `x` to\nback-propagate unmodified and set all other gradients to 0. If\n`abs` is True, the `k` largest gradients are selected by\nmagnitude. Dimensions before `base_axis` are treated as number\nof sample dimensions and `k` gradients get selected from all\ngradients of a sample (dimensions from `base_axis`) regardless\nof shape.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "k",
- "required": true,
- "type": "int64",
- "description": "Number of largest gradients to propagate."
- },
- {
- "name": "abs",
- "type": "boolean",
- "default": false,
- "description": "Determine largest gradients by magnitude."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "First dimension of the sample shape."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with same shape and data as `x`."
- }
- ]
- },
- {
- "name": "Rand",
- "description": "Samples numbers from a uniform distribution :math:`x \\sim U(low, high)`\ngiven lowest value :math:`low`, upper bound :math:`high`,\nand shape of the returned Variable.",
- "attributes": [
- {
- "name": "low",
- "type": "float32",
- "default": 0.0,
- "description": ":math:`low` in definition."
- },
- {
- "name": "high",
- "type": "float32",
- "default": 1.0,
- "description": ":math:`high` in definition."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument."
- }
- ]
- },
- {
- "name": "Randint",
- "description": "Samples integer numbers from a uniform distribution :math:`x \\sim U(low, high)`\ngiven lowest value :math:`low`, upper bound :math:`high`, and the shape of the returned Variable. The lowest\nvalue :math:`low` is included in the range, while the upper bound :math:`high` is excluded, corresponding to the half-open\ninterval :math:`[low, high)`.",
- "attributes": [
- {
- "name": "low",
- "type": "int64",
- "default": 0,
- "description": ":math:`low` in definition."
- },
- {
- "name": "high",
- "type": "int64",
- "default": 1,
- "description": ":math:`high` in definition."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument. The dtype is int32."
- }
- ]
- },
- {
- "name": "Randn",
- "description": "Samples numbers from a normal distribution :math:`x \\sim N(\\mu, \\sigma)`\ngiven mean :math:`\\mu`, standard deviation :math:`\\sigma`,\nand shape of the returned Variable.",
- "attributes": [
- {
- "name": "mu",
- "type": "float32",
- "default": 0.0,
- "description": ":math:`\\mu` in definition."
- },
- {
- "name": "sigma",
- "type": "float32",
- "default": 1.0,
- "description": ":math:`\\sigma` in definition."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument."
- }
- ]
- },
- {
- "name": "RandBinomial",
- "description": "Samples numbers from a binomial distribution :math:`x \\sim B(n, p)`\ngiven the numbers of trials :math:`n`, probability :math:`p`,\nand shape of the returned Variable.\nWhen :math:`n = 1`, this behaves like the Bernoulli distriburion.",
- "attributes": [
- {
- "name": "n",
- "type": "int64",
- "default": 1,
- "description": ":math:`n` in definition, the number of trials."
- },
- {
- "name": "p",
- "type": "float32",
- "default": 0.5,
- "description": ":math:`p` in definition, probability of success."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument."
- }
- ]
- },
- {
- "name": "RandBeta",
- "description": "Samples numbers from a beta distribution :math:`x \\sim \\beta(\\alpha, \\beta)`.",
- "attributes": [
- {
- "name": "alpha",
- "type": "float32",
- "default": 0.5,
- "description": ":math:`\\alpha`, scale parameter."
- },
- {
- "name": "beta",
- "type": "float32",
- "default": 0.5,
- "description": ":math:`\\beta`, scale parameter."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument."
- }
- ]
- },
- {
- "name": "RandGamma",
- "description": "Samples numbers from a gamma distribution :math:`x \\sim \\frac {\\gamma(k, \\frac {x}{\\theta})}{\\Gamma(k)}`.",
- "attributes": [
- {
- "name": "k",
- "type": "float32",
- "default": 0.5,
- "description": "k, scale parameter."
- },
- {
- "name": "theta",
- "type": "float32",
- "default": 1.0,
- "description": ":math:`\\theta`, scale parameter."
- },
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Shape of returned variable."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Variable with the shape specified in the argument."
- }
- ]
- },
- {
- "name": "RandomChoice",
- "description": "Generate random samples from population `x` with selection probabilities\ndetermined by the relative weights `w`. The number of samples to draw is\ngiven by the product of `shape`\\s dimensions, and the samples are returned\nwith the given `shape`. By default, samples are drawn with replacement,\ni.e. selection of a specific population member is solely determined by\nits associated weight. Sampling without replacement, where any population\nmember may be drawn only once, is used if `replace` is set to False.\n\nFor both `x` and `w` the innermost dimension corresponds to the individual\npopulations and their weights from which samples are returned with the\nrequested `shape` following all outermost dimensions of the input.\n\n.. code-block:: python\n\n import nnabla as nn\n import nnabla.functions as F\n import numpy as np\n nn.set_auto_forward(True)\n\n # x holds two populations\n x = nn.Variable.from_numpy_array(np.array([[11, 22, 33], [110, 220, 330]]))\n # w holds the weights for each population\n w = nn.Variable.from_numpy_array(np.array([[10, 20, 70], [70, 20, 10]]))\n\n # draw one sample from each population\n y = F.random_choice(x, w) # y.shape => (2, 1)\n\n # draw 12 samples with shape (3, 4) from each population\n y = F.random_choice(x, w, shape=(3, 4)) # y.shape => (2, 3, 4)\n\nNote that weights must not be less than zero and for each population the\nsum of weights must be greater than zero. Additionally, sampling without\nreplacement requires that the number of non-zero weights is not less than\nthe number of samples to be drawn. These conditions are verified in \"cpu\"\ncomputation context but not when using \"cuda\" or \"cudnn\" acceleration\n(this would require additional device synchronization steps penalizing\nperformance).\n\nRandom sampling from an implicit array of index values (like categorical\nor multinomial) can be realized with input `x` constructed as indices.\n\n.. code-block:: python\n\n w = nn.Variable.from_numpy_array(np.array([1, 2, 3, 2, 1]))\n y = F.random_choice(F.arange(0, 5), w)",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array from which a random sample is generated."
- },
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "N-D array of associated weights of elements in `x`."
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "type": "shape",
- "default": "[]",
- "description": "Number and shape of generated samples."
- },
- {
- "name": "replace",
- "type": "boolean",
- "default": true,
- "description": "Whether sampling is with or without replacement."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "RandomCrop",
- "description": "RandomCrop randomly extracts a portion of an array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "type": "shape",
- "default": "x.shape",
- "description": "The data size to extract. For example, to randomly extract a portion of the image (3,48,48) from a 3,64,64 image, specify (3,48,48)."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "No Description"
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "RandomFlip",
- "description": "Reverses the order of elements of the specified dimension of an array at 50% probability.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "axes",
- "type": "int64[]",
- "default": "[len(x.shape) - 1]",
- "description": "The index of the axis to reverse the order of the elements. Axis indices take on values 0, 1, 2, and so on from the left. For example, to flip a 32 (W) by 24 (H) 100 RGB images (100, 3,24,32) vertically and horizontally at random, specify (2,3)."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "No Description"
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "RandomShift",
- "description": "Randomly shifts the array elements within the specified range.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "shifts",
- "type": "int64[]",
- "default": "(0,) * len(x.shape)",
- "description": "Max absolute amount to shift elements. For example, to shift image data horizontally by :math:`\\pm 2` pixels and vertically by :math:`\\pm 3` pixels, specify (3,2)."
- },
- {
- "name": "border_mode",
- "type": "string",
- "default": "nearest",
- "description": "Specify how to process the ends of arrays whose values will be undetermined as a result of shifting. nearest: The data at the ends of the original array is copied and used. reflect: Original data reflected at the ends of the original array is used. constant: Constant value is used."
- },
- {
- "name": "constant_value",
- "type": "float32",
- "default": 0.0,
- "description": "Value used for outside of the original array if border_mode='constant'."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "No Description"
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "RandomErase",
- "description": "Randomly erase patches of the inputs and replace with random values.\n\nErasing is applied for each sample and for each `n` with the given probability, the randomly\nselected area ratio and aspect ratio if `share` is `True`;\notherwise (`share`=`False`), for each feature additionally.\n\nRandom patch are selected by random coordinates as the following,\n\n.. math::\n\n S_e &&= Uniform(s_l, s_h) \\times S \\\\\n r_e &&= Uniform(r_l, r_h) \\\\\n H_e &&= \\sqrt{S_e \\times r_e} \\\\\n W_e &&= \\sqrt{S_e / r_e} \\\\\n y_e &&= Uniform(0, H - H_e) \\\\\n x_e &&= Uniform(0, W - W_e),\n\nwhere :math:`S` is the area, :math:`s_l` and :math:`s_h` are the low and high values of\nthe area ratio range, :math:`r_l` and :math:`r_h` are the low and high values\nof the aspect ratio range, :math:`H_e` and :math:`W_e` are height and width of a patch,\nand :math:`y_e` and :math:`x_e` are the start coordinates of a patch. If a pixel of the inputs\nfalls in this patch, the value of that pixel is replaced with a random value in `replacements`\nrange.\n\nBackward is implemented as passing gradients if `ste_fine_grained` is False; otherwise,\nthe backward only occurs in regions not erased.\n\nReferences:\n\n * `Zhun Zhong, Liang Zheng, Guoliang Kang, Shaozi Li, Yi Yang,\n Random Erasing Data Augmentation,\n <https://arxiv.org/abs/1708.04896>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "prob",
- "type": "float32",
- "default": 0.5,
- "description": "Probability to erase."
- },
- {
- "name": "area_ratios",
- "type": "float32[]",
- "default": "(0.02, 0.4)",
- "description": "Low and high of the area ratio range."
- },
- {
- "name": "aspect_ratios",
- "type": "float32[]",
- "default": "(0.3, 3.3333)",
- "description": "Low and high of the aspect ratios range."
- },
- {
- "name": "replacements",
- "type": "float32[]",
- "default": "(0.0, 255.0)",
- "description": "Low and high of the replacement value range."
- },
- {
- "name": "n",
- "type": "int64",
- "default": 1,
- "description": "Max number of patches to be erased."
- },
- {
- "name": "share",
- "type": "boolean",
- "default": true,
- "description": "Use a same bounding box randomly picked over the feature dimension when being True. Default is True."
- },
- {
- "name": "inplace",
- "type": "boolean",
- "default": false,
- "description": "This option is obsolete and ignored. Output is never in-placed with input."
- },
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- },
- {
- "name": "ste_fine_grained",
- "type": "boolean",
- "default": true,
- "description": "Straight Through Estimator is fine-grained or not. Default is True."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "ImageAugmentation",
- "description": "ImageAugmentation randomly alters the input image.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "shape",
- "type": "shape",
- "default": "x.shape",
- "description": "The output image data size."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0, 0)",
- "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension."
- },
- {
- "name": "min_scale",
- "type": "float32",
- "default": 1.0,
- "description": "The minimum scale ratio when randomly scaling the image. For example, to scale down to 0.8 times the size of the original image, specify \"0.8\". To not apply random scaling, set both min_scale and max_scale to \"1.0\"."
- },
- {
- "name": "max_scale",
- "type": "float32",
- "default": 1.0,
- "description": "The maximum scale ratio when randomly scaling the image. For example, to scale down to 2 times the size of the original image, specify \"2.0\"."
- },
- {
- "name": "angle",
- "type": "float32",
- "default": 0.0,
- "description": "The rotation angle range in radians when randomly rotating the image. The image is randomly rotated in the -Angle to +Angle range. For example, to rotate in a +-15 degree range, specify \"0.26\" (15 degrees/360 degrees * 2PI). To not apply random rotation, specify \"0.0\"."
- },
- {
- "name": "aspect_ratio",
- "type": "float32",
- "default": 1.0,
- "description": "The aspect ratio range when randomly deforming the image. For example, to deform aspect ratio of image from 1:1.3 to 1.3:1, specify \"1.3\". To not apply random deforming, specify \"1.0\"."
- },
- {
- "name": "distortion",
- "type": "float32",
- "default": 0.0,
- "description": "The distortion range when randomly distorting the image. To not apply distortion, specify \"0.0\"."
- },
- {
- "name": "flip_lr",
- "type": "boolean",
- "default": false,
- "description": "Whether to randomly flip the image horizontally at 50% probability."
- },
- {
- "name": "flip_ud",
- "type": "boolean",
- "default": false,
- "description": "Whether to randomly flip the image vertically at 50% probability."
- },
- {
- "name": "brightness",
- "type": "float32",
- "default": 0.0,
- "description": "The absolute range of values to randomly add to the brightness. A random value in the -Brightness to +Brightness range is added to the brightness. For example, to vary the brightness in the -0.05 to +0.05 range, specify \"0.05\". To not apply random addition to brightness, specify \"0.0\"."
- },
- {
- "name": "brightness_each",
- "type": "boolean",
- "default": false,
- "description": "Whether to apply the random addition to brightness (as specified by brightness) to each color channel. True: brightness is added based on a different random number for each channel. False: brightness is added based on a random number common to all channels."
- },
- {
- "name": "contrast",
- "type": "float32",
- "default": 1.0,
- "description": "The range in which to randomly vary the image contrast. The contrast is varied in the 1/Contrast times to Contrast times range. The output brightness is equal to (input - contrast_center) * contrast + contrast_center. For example, to vary the contrast in the 0.91 times to 1.1 times range, specify \"1.1\". To not apply random contrast variation, specify \"1.0\"."
- },
- {
- "name": "contrast_center",
- "type": "float32",
- "default": 0.0,
- "description": "Intensity center used for applying contrast."
- },
- {
- "name": "contrast_each",
- "type": "boolean",
- "default": false,
- "description": "Whether to apply the random contrast variation (as specified by contrast) to each color channel. True: contrast is varied based on a different random number for each channel. False: contrast is varied based on a random number common to all channels."
- },
- {
- "name": "noise",
- "type": "float32",
- "default": 0.0,
- "description": "Sigma of normal random number to be added."
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "SigmoidCrossEntropy",
- "description": "Element-wise cross entropy between `x` and the target variables, passed to a sigmoid function.\n\n.. math::\n y_i = - \\left(x^{(1)}_i \\ln \\left(\\sigma \\left(x^{(0)}_i \\right)\\right) + \\\n \\left(1 - x^{(1)}_i\\right) \\ln \\left(1 - \\sigma \\left(x^{(0)}_i \\\n \\right)\\right)\\right)\n\nwhere :math:`\\sigma(s)=\\frac{1}{1+\\exp(-s)}`.\n\nNote:\n SigmoidCrossEntropy is equivalent to Sigmoid+BinaryCrossEntropy, but computing them at once has the effect of reducing computational error.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array. Typically indicates a score. The value lies in :math:`[-\\infty, \\infty]`"
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "N-D array of labels. Only 0 or 1 value is allowed."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses."
- }
- ]
- },
- {
- "name": "BinaryCrossEntropy",
- "description": "Element-wise cross entropy between `x` and the target variables.\n\n.. math::\n y_i = - \\left(x^{(1)}_i * \\ln \\left(x^{(0)}_i\\right) + \\left(1 - \\\n x^{(1)}_i\\right) * \\ln \\left(1 - x^{(0)}_i\\right)\\right).",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Probabilities N-D array. :math:`-\\infty` to :math:`\\infty`."
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "N-D array of labels. Usually set as 0 or 1, but, unlike SigmoidCrossEntropy, it allows probability (0 to 1) as inputs and backpropagation can be done."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses."
- }
- ]
- },
- {
- "name": "SoftmaxCrossEntropy",
- "description": "Element-wise cross entropy between the variables and the variables of a label given by a category index with Softmax normalization.\n\n.. math::\n y_{j} = -\\ln \\left(\\frac{\\exp(x_{j,t_j})}{\\sum_{i'} \\exp(x_{j,i'})}\\right)\n\nalong dimension specified by axis (:math:`i` is the axis where normalization is performed on).\n\nNote:\n SoftmaxCrossEntropy is equivalent to Softmax+CategoricalCrossEntropy, but computing them at once has the effect of reducing computational error.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array. Typically indicates a score. :math:`(D_1 \\times ... \\times D_i \\times ... \\times D_N)`"
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "N-D array of labels. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)` , each label should be the index from 0 to n-class, -1 if not belongs any class."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis normalization is taken."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`"
- }
- ]
- },
- {
- "name": "CategoricalCrossEntropy",
- "description": "Element-wise cross entropy between `x` and the target `t` where targets are given by a category index.\n\n.. math::\n y_{j} = -\\ln \\left( x_{j, t_j} \\right)\n\nalong dimension specified by axis (:math:`i` is the axis where normalization is performed on).",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array. Typically indicates a score. :math:`(D_1 \\times ... \\times D_i \\times ... \\times D_N)`"
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "N-D array of labels. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`, each label should be the index from 0 to n-class, -1 if not belongs any class."
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis normalization is taken."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses. :math:`(D_1 \\times ... \\times 1 \\times ... \\times D_N)`"
- }
- ]
- },
- {
- "name": "SquaredError",
- "description": "Element-wise squared error\n\n.. math::\n y_i = \\left(x^{(0)}_i - x^{(1)}_i\\right)^2.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "AbsoluteError",
- "description": "Element-wise absolute error\n\n.. math::\n y_i = | x^{(0)}_i - x^{(1)}_i |.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "HuberLoss",
- "description": "Element-wise Huber loss\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n d^2 & (|d| < \\delta)\\\\\n \\delta (2 |d| - \\delta) & ({\\rm otherwise})\n \\end{array} \\right.\n\nwhere :math:`d = x^{(0)}_i - x^{(1)}_i`",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "delta",
- "type": "float32",
- "default": 1.0,
- "description": "Delta"
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses."
- }
- ]
- },
- {
- "name": "EpsilonInsensitiveLoss",
- "description": "Element-wise Epsilon Insensitive Loss\n\n.. math::\n y_i= \\left\\{\n \\begin{array}{ll}\n | x^{(0)}_i - x^{(1)}_i | - \\epsilon & if \\ \\ | x^{(0)}_i - x^{(1)}_i | > \\epsilon \\\\\n\t\t\t0 & otherwise\n \\end{array} \\right.",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "N-D array."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "attributes": [
- {
- "name": "epsilon",
- "required": true,
- "type": "float32",
- "description": "Insensitive parameter."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array of element-wise losses."
- }
- ]
- },
- {
- "name": "KLMultinomial",
- "description": "The Kullback Leibler Divergence for multinomial distributions.\n\n.. math::\n D = \\sum_i p_i \\log \\left( \\frac{p_i}{q_i} \\right)",
- "inputs": [
- {
- "name": "p",
- "type": "nnabla.Variable",
- "description": "N-D array of the source categorical probabilities"
- },
- {
- "name": "q",
- "type": "nnabla.Variable",
- "description": "N-D array of the target categorical probabilities"
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- }
- ],
- "outputs": [
- {
- "name": "D",
- "type": "nnabla.Variable",
- "description": "Kullback Leibler divergence :math:`KL(p \\parallel q)`."
- }
- ]
- },
- {
- "name": "AffineGrid",
- "description": "Generate the source grid based on the normalized target grid with `size`.\nThe target grid is first normalized in [-1, 1], then\ntranformed by the affine transformation :math:`\\theta` to generate\nthe source grid. 2D and 3D grid are supported now.\n\nThis function is normally used with the `warp_by_grid` function for\nconstructing the spatial transformer.",
- "inputs": [
- {
- "name": "theta",
- "type": "nnabla.Variable",
- "description": "N-D array with the shape (:math:`B \\times 2 \\times 3`), the sample-wise affine transformation matrix."
- }
- ],
- "attributes": [
- {
- "name": "size",
- "required": true,
- "type": "int64[]",
- "description": "The grid size of (:math:`H \\times W`) for 2D and (:math:`D \\times H \\times W`) for 3D."
- },
- {
- "name": "align_corners",
- "type": "boolean",
- "default": false,
- "description": "If `True`, the top-left and bottom-right pixels correspond to (-1, -1) and (1, 1) respectively since a pixel is located on the corner of a grid, and the target grid is normalized in [-1, 1].\nIf `False`, the normalized target grid in [-1, 1] is scaled by `size - 1 / size` according to the respective spatial size (e.g., :math:`H` and :math:`W`) before the transformation since a pixel is located on a center of a cell in a grid."
- }
- ],
- "outputs": [
- {
- "name": "grid",
- "type": "nnabla.Variable",
- "description": "N-D array with the shape (:math:`B \\times H \\times W \\times 2`) for 2D and (:math:`B \\times D \\times H \\times W \\times 3`) for 3D. The last dimension of 2 is for (x, y) and of 3 for (x, y, z). The `gird` is used as the source grid for the warping."
- }
- ]
- },
- {
- "name": "WarpByGrid",
- "description": "Warp the input data by the grid.\nThis function is normally used with the generated normalized grid by\nthe `affine_grid` function for constructing the spatial transformer.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input data to be warped with the shape (:math:`B \\times C \\times H_{in} \\times W_{in}`) for 2D and (:math:`B \\times C \\times D_{in} \\times H_{in} \\times W_{in}`) for 3D."
- },
- {
- "name": "grid",
- "type": "nnabla.Variable",
- "description": "Grid warping the input data with the shape (:math:`B \\times H_{out} \\times W_{out} \\times 2`) for 2D and (:math:`B \\times D_{out} \\times H_{out} \\times W_{out} \\times 3`) for 3D. The last dimension of 2 is for (x, y) or 3 for (x, y, z)."
- }
- ],
- "attributes": [
- {
- "name": "mode",
- "type": "string",
- "default": "linear",
- "description": "Interpolation mode, linear or nearest."
- },
- {
- "name": "padding_mode",
- "type": "string",
- "default": "zero",
- "description": "Padding mode when the grid value is outside [-1, 1]. If this is \"zero\", 0 is used for padding. \"reflect\" uses the values reflected at the ends of the original input data like the mirror. \"repeat\" used the values at the ends of the original input data."
- },
- {
- "name": "align_corners",
- "type": "boolean",
- "default": false,
- "description": "The target grid normalized in [-1, 1] is scaled by `size - 1 / size` according to the respective spatial size (e.g., :math:`H` and :math:`W`) before the transformation if this is `False`. If this is `True`, the top-left and bottom-right pixels correspond to (-1, -1) and (1, 1) respectively."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output data warped by the grid."
- }
- ]
- },
- {
- "name": "WarpByFlow",
- "description": "Transform the image(s) *data* by *flow* field(s) of offset vectors such\nthat each output pixel corresponds to the input image pixel at the\nrelative offset location given by horizontal and vertical flow values\n(in other words, the flow field describes the coordinate displacements\nfor each output pixel to the corresponding input pixel). Both *data* and\n*flow* are 4-D variables (in \"NCHW\" layout) with identical shape except\nthe *flow* channel dimension (which is always 2).\n\n.. math::\n output_{n,c,y,x} = data_{n,c,y',x'},\n\nwhere\n\n.. math::\n y' &=& y + flow_{n,1,y,x}, \\\\\n x' &=& x + flow_{n,0,y,x}.\n\nThe output pixel values at :math:`y'` and :math:`x'` locations are\nobtained by bilinear interpolating between the 4 closest pixels of the\ninput image. Pixel values outside of the input image are implicitly\npadded with the value of the closest boundary pixel.",
- "inputs": [
- {
- "name": "data",
- "type": "nnabla.Variable",
- "description": "Input image data with shape `(N, Channels, Height, Width)`."
- },
- {
- "name": "flow",
- "type": "nnabla.Variable",
- "description": "Flow field vectors with shape `(N, 2, Height, Width)`."
- }
- ],
- "outputs": [
- {
- "name": "warped_image",
- "type": "nnabla.Variable",
- "description": "Transformed image data with shape `(N, Channels, Height, Width)`."
- }
- ]
- },
- {
- "name": "BinarySigmoid",
- "description": "Element-wise binary sigmoid function. In the forward pass, it computes\n\n.. math::\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n 0 & ({\\rm otherwise})\\end{cases},\n\nbut in the backward pass, a straight-through approximation of the gradient\nis used, i.e.,\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} =\n \\begin{cases}\n 0 & (|x| \\geq 1) \\\\\n \\frac{1}{2} & ({\\rm otherwise})\n \\end{cases}.\n\nReferences:\n\n * `Courbariaux, Matthieu, and Yoshua Bengio. Binarynet: Training deep\n neural networks with weights and activations constrained to+ 1 or-1.\n <https://arxiv.org/abs/1602.02830>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input ."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output."
- }
- ]
- },
- {
- "name": "BinaryTanh",
- "description": "Element-wise binary tanh function. In the forward pass, it computes\n\n.. math::\n f(x) = \\begin{cases}\n 1 & (x > 0) \\\\\n -1 & ({\\rm otherwise})\n \\end{cases},\n\nbut in the backward pass, a straight-through approximation of the gradient\nis used, i.e.,\n\n.. math::\n \\frac{\\partial f(x)}{\\partial x} =\n \\begin{cases}\n 0 & (|x| \\geq 1) \\\\\n 1 & ({\\rm otherwise}) \\end{cases}.\n\nReferences:\n\n * `Courbariaux, Matthieu, and Yoshua Bengio. Binarynet: Training deep\n neural networks with weights and activations constrained to+ 1 or-1.\n <https://arxiv.org/abs/1602.02830>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input ."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output."
- }
- ]
- },
- {
- "name": "BinaryConnectAffine",
- "description": "This function provides a BinaryConnect affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\sum_{i} sign(w_{j,i}) x_i,\n\ni.e., the weights :math:`w_{j,i}` are binarized to :math:`sign(w_{j,i})` and,\nhence, each weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations do not require any multiplications anymore as\nthey turn into additions/subtractions.\n\nThis function should be used together with\n:meth:`~nnabla.functions.batch_normalization`.\n\n.. note::\n\n 1) If you would like to share the binary weights between other\n layers, please use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.\n\nReferences:\n\n * `M. Courbariaux, Y. Bengio, and J.-P. David. BinaryConnect:\n Training Deep Neural Networks with binary weights during propagations.\n <https://arxiv.org/abs/1511.00363>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input ."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight ."
- },
- {
- "name": "binary_weight",
- "type": "nnabla.Variable",
- "description": "Binarized weight ."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "quantize_zero_to",
- "type": "float32",
- "default": 1.0,
- "description": "Input value at zero is quantized to this value."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output."
- }
- ]
- },
- {
- "name": "BinaryConnectConvolution",
- "description": "This function provides a BinaryConnect convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\sum_{m} \\sum_{i} \\sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j},\n\ni.e., the weights :math:`w_{n, m, i, j}` are binarized to\n:math:`sign(w_{n, m, i, j})` and, hence,\neach weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations do not require any multiplications anymore as\nthey turn into additions/subtractions.\n\nThis function should be used together with :meth:`~nnabla.functions.batch_normalization`.\n\nReference\n\n * `M. Courbariaux, Y. Bengio, and J.-P. David. BinaryConnect:\n Training Deep Neural Networks with binary weights during propagations.\n <https://arxiv.org/abs/1511.00363>`_\n\n\n.. note::\n\n 1) If you would like to share the binary weights between other\n layers, please use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight."
- },
- {
- "name": "binary_weight",
- "type": "nnabla.Variable",
- "description": "Binarized weight."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "quantize_zero_to",
- "type": "float32",
- "default": 1.0,
- "description": "Input value at zero is quantized to this value."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ]
- },
- {
- "name": "BinaryWeightAffine",
- "description": "This function provides a Binary Weight Network affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \\sum_{i} sign(w_{j,i}) x_i\n\ni.e., the weights :math:`w_{j,i}` are binarized to :math:`sign(w_{j,i})` and,\nhence, each weight is in :math:`\\{-1,\\,1\\}`. By this weight binarization, the\ninner product computations turn into additions/subtractions which are followed\nby multiplication with the scaling factor\n:math:`\\alpha_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`.\n\nReference\n\n * `Rastegari, Mohammad, et al. XNOR-Net: ImageNet Classification Using\n Binary Convolutional Neural Networks.\n <https://arxiv.org/abs/1603.05279>`_\n\n.. note::\n\n 1) If you would like to share the binary weights with other layers, please\n use the standard, floating value weights (`weight`) and not the binary\n weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once before\n doing so, otherwise the weights and the binary weights will not be in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input ."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight."
- },
- {
- "name": "binary_weight",
- "type": "nnabla.Variable",
- "description": "Binarized weight."
- },
- {
- "name": "alpha",
- "type": "nnabla.Variable",
- "description": "Alpha."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "quantize_zero_to",
- "type": "float32",
- "default": 1.0,
- "description": "Input value at zero is quantized to this value."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output."
- }
- ]
- },
- {
- "name": "BinaryWeightConvolution",
- "description": "This function provides a Binary Weight Network convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}} \\sum_{m} \\sum_{i} \\sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.\n\ni.e., the weights :math:`w_{n, m, i, j}` are binarized to\n:math:`sign(w_{n, m, i, j})` and, hence, each weight is in :math:`\\{-1,\\,1\\}`.\nBy this weight binarization, the inner product computations turn into\nadditions/subtractions which are followed by multiplication with the scaling\nfactor :math:`\\alpha_n = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}}`.\n\nReference\n\n * `Rastegari, Mohammad, et al. XNOR-Net: ImageNet Classification Using\n Binary Convolutional Neural Networks.\n <https://arxiv.org/abs/1603.05279>`_\n\n.. note::\n\n 1) If you would like to share the binary weights between other standard layers, please\n use the standard, floating value weights (`weight`)\n and not the binary weights (`binary_weight`).\n\n 2) The weights and the binary weights become in sync only after a call to\n :meth:`~nnabla.Variable.forward`, and not after a call to\n :meth:`~nnabla.Variable.backward`. If you wish to store the parameters of\n the network, remember to call :meth:`~nnabla.Variable.forward`, once\n before doing so, otherwise the weights and the binary weights will not be\n in sync.\n\n 3) CPU and GPU implementations now use floating values for `binary_weight`,\n since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight."
- },
- {
- "name": "binary_weight",
- "type": "nnabla.Variable",
- "description": "Binarized weight."
- },
- {
- "name": "alpha",
- "type": "nnabla.Variable",
- "description": "Alpha."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "quantize_zero_to",
- "type": "float32",
- "default": 1.0,
- "description": "Input value at zero is quantized to this value."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ]
- },
- {
- "name": "INQAffine",
- "description": "This function provides a INQ affine layer. It computes in\nthe forward pass\n\n.. math::\n\n y_j = \\sum_{i} w_{j,i} x_i,\n\nwhere the weights :math:`w_{j,i}` are quantized sequentially during\ntraining to power-of-two numbers. In the backward pass, only the non-fixed\n(i.e., learnable) weights are updated.\n\nReferences:\n\n * `Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:\n Towards lossless CNNs with low-precision weights.\n <https://arxiv.org/abs/1702.03044>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input ."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight ."
- },
- {
- "name": "indicator_fixedweights",
- "type": "nnabla.Variable",
- "description": "Indicates which weights are already fixed (0 = not fixed, 1 = fixed) ."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "num_bits",
- "type": "int64",
- "default": 4,
- "description": "Number of bits per weight. Needs to be >= 2 as two bits are used to code `zero` and sign of weight."
- },
- {
- "name": "inq_iterations",
- "type": "int64[]",
- "default": "()",
- "description": "List which specifies after how many forward passes we fix 50% of the learnable weights. If we have done as many iterations as specified in the last element of `inq_iterations`, then all weights are fixed."
- },
- {
- "name": "selection_algorithm",
- "type": "string",
- "default": "largest_abs",
- "description": "Chooses algorithm that we use for selecting the weights to fix (\"largest_abs\" ... fix weights with largest absolute value, \"random\" ... fix weights randomly)"
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output."
- }
- ]
- },
- {
- "name": "INQConvolution",
- "description": "This function provides a INQ convolution layer. It computes in\nthe forward pass\n\n.. math::\n\n y_{n, a, b} = \\sum_{m} \\sum_{i} \\sum_{j} w_{n, m, i, j} x_{m, a + i, b + j},\n\nwhere the weights :math:`w_{j,i}` are quantized sequentially during\ntraining to power-of-two numbers. In the backward pass, only the non-fixed\n(i.e., learnable) weights are updated.\n\nReference\n\n * `Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:\n Towards lossless CNNs with low-precision weights.\n <https://arxiv.org/abs/1702.03044>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input."
- },
- {
- "name": "weight",
- "type": "nnabla.Variable",
- "description": "Weight."
- },
- {
- "name": "indicator_fixedweights",
- "type": "nnabla.Variable",
- "description": "Indicates which weights are already fixed (0 = not fixed, 1 = fixed) ."
- },
- {
- "name": "bias",
- "type": "nnabla.Variable",
- "option": "optional",
- "description": "Bias."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * (len(x.shape) - (base_axis+1))",
- "description": "Padding sizes for dimensions."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Stride sizes for dimensions."
- },
- {
- "name": "dilation",
- "type": "shape",
- "default": "(1,) * (len(x.shape) - (base_axis+1))",
- "description": "Dilation sizes for dimensions."
- },
- {
- "name": "group",
- "type": "int64",
- "default": 1,
- "description": "Number of groups of channels. This makes the connection across channels sparser, by grouping connections along the mapping direction."
- },
- {
- "name": "num_bits",
- "type": "int64",
- "default": 4,
- "description": "Number of bits per weight. Needs to be >= 2 as two bits are used to code `zero` and sign of weight."
- },
- {
- "name": "inq_iterations",
- "type": "int64[]",
- "default": "()",
- "description": "List which specifies after how many forward passes we fix 50% of the learnable weights. If we have done as many iterations as specified in the last element of `inq_iterations`, then all weights are fixed."
- },
- {
- "name": "selection_algorithm",
- "type": "string",
- "default": "largest_abs",
- "description": "Chooses algorithm that we use for selecting the weights to fix (\"largest_abs\" ... fix weights with largest absolute value, \"random\" ... fix weights randomly)"
- },
- {
- "name": "seed",
- "type": "int64",
- "default": -1,
- "description": "Random seed. When -1, seed is sampled from global random number generator."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ]
- },
- {
- "name": "FixedPointQuantize",
- "description": "This function simulates to uniformly quantize values in fixed-point number representation.\n\nIn the forward pass,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax & if \\ \\ \\ x_i > max \\\\\n\t\t sign(x_i) \\times floor(|x_i| \\delta^{-1} + 2^{-1}) \\times \\delta & if \\ \\ min \\le x_i \\le max \\\\\n\t \tmin & if \\ \\ x_i < min \\\\\n\t \\end{array} \\right.,\n\nwhere :math:`\\delta` is the step size,\n:math:`(min, max) :=(- (2^{n-1} - 1)\\delta, (2^{n-1} - 1)\\delta)` if :math:`sign` is true,\n:math:`(min, max) := (0, (2^n - 1) \\delta)` otherwise, and\n:math:`n` is the total bit-width used.\n\nIn the backward pass when using `ste_fine_grained` as false,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\nIn the backward pass when using `ste_fine_grained` as true,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n\t \\begin{array}{ll}\n\t\t\t0 & if \\ \\ \\ x_i > max \\\\\n\t\t 1 & if \\ \\ min \\le x_i \\le max \\\\\n\t \t0 & if \\ \\ x_i < min \\\\\n\t \\end{array} \\right..\n\n.. note::\n\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "sign",
- "type": "boolean",
- "default": true,
- "description": "Indicate the signed number or the unsigned number. Default is true."
- },
- {
- "name": "n",
- "type": "int64",
- "default": 8,
- "description": "Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case."
- },
- {
- "name": "delta",
- "type": "float32",
- "default": 0.0625,
- "description": "Step size."
- },
- {
- "name": "ste_fine_grained",
- "type": "boolean",
- "default": true,
- "description": "Straight Through Estimator is fine-grained or not."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "MinMaxQuantize",
- "description": "This function simulates to uniformly quantize values in the range of min and max quantization levels.\n\nMin-max quantization is defined as the following equation\n\n.. math::\n\n y = round \\left(\\frac{\\min(\\max(x, m), M) - m}{scale} \\right) \\times scale + m,\n\nwhere the :math:`scale` is defined as\n\n.. math::\n\n scale = \\frac{M - m}{M_q - m_q},\n\nand\n\n.. math::\n\n m_q = ql_{min}, \\\\\n M_q = ql_{max}, \\\\\n m = qr_{min}, \\\\\n M = qr_{max}.\n\nIn the backward pass when using `ste_fine_grained` as false,\n\n .. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\n\nIn the backward pass when using `ste_fine_grained` as true,\n\n .. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n \\begin{array}{ll}\n 0 & if \\ \\ \\ x_i > M \\\\\n 1 & if \\ \\ m \\le x_i \\le M \\\\\n 0 & if \\ \\ x_i < m \\\\\n \\end{array} \\right..\n\n:math:`qr_{min}` and :math:`qr_{max}` are treaded as follows.\n\n * `x_min_max` is `True` and `ema` is `True`:\n Exponential moving average are computed for each :math:`min(x)` and :math:`max(x)`\n then stored in :math:`qr_{min}` and :math:`qr_{max}`.\n * `x_min_max` is `True` and `ema` is `False`:\n :math:`min(x)` and :math:`max(x)` are computed then stored in :math:`qr_{min}` and :math:`qr_{max}`.\n * `x_min_max` is `False` and `ema` is `True`:\n Exponential moving average stored in :math:`qr_{min}` and :math:`qr_{max}` are used.\n * `x_min_max` is `False` and `ema` is `False`\n Gradients of :math:`qr_{min}` and :math:`qr_{max}` are computed in the backward pass.\n\nMore precisely, in inference of the min-max quantization, one has to consider *zero-point (zp)*\nwhich corresponds\nto the real value 0, and its data type is an integer. *zero-point* is defined as\n\n .. math::\n\n && zp_f = ql_{min} -\\frac{qr_{min}}{scale}, \\\\\n && zp = \\left\\{\n \\begin{array}{ll}\n ql_{max} & if \\ \\ \\ zp_f >= ql_{max} \\\\\n round(zp_f) & if \\ \\ otherwise \\\\\n ql_{min} & if \\ \\ zp_f <= ql_{min} \\\\\n \\end{array} \\right..\n\nAccordingly, in order to simulate quantization effect of *zero-point*,\nduring both forward and backward pass, :math:`qr_{min}` and :math:`qr_{max}` are adjusted as follows,\n\n .. math::\n\n qr_{min}^{adj} = ql_{min} - zp * scale, \\\\\n qr_{max}^{adj} = ql_{max} - zp * scale.\n\nThese operations are often called *nudge*.\n\nFinally, in the formulas of the min-max quantization, :math:`m` and :math:`M` are replaced by\n:math:`qr_{min}^{adj}` and :math:`qr_{max}^{adj}` respectively.\n\n.. note::\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array innput."
- },
- {
- "name": "qr_min",
- "type": "nnabla.Variable",
- "description": "Minimum value for the quantization range, modified during forward execution when x_min_max is True."
- },
- {
- "name": "qr_max",
- "type": "nnabla.Variable",
- "description": "Maximum value for the quantization range, modified during forward execution when x_min_max is True."
- },
- {
- "name": "ql_min",
- "type": "nnabla.Variable",
- "description": "Minimum value for the quantization level, typically 0."
- },
- {
- "name": "ql_max",
- "type": "nnabla.Variable",
- "description": "Maximum value for the quantization level, typically 255."
- }
- ],
- "attributes": [
- {
- "name": "decay",
- "type": "float32",
- "default": 0.999,
- "description": "Decay rate for the exponential moving average."
- },
- {
- "name": "x_min_max",
- "type": "boolean",
- "default": false,
- "description": "Use the min and max of x to compute quantization ranges."
- },
- {
- "name": "ema",
- "type": "boolean",
- "default": false,
- "description": "Use the exponential moving average for the min and max quantization ranges."
- },
- {
- "name": "ste_fine_grained",
- "type": "boolean",
- "default": true,
- "description": "Straight Through Estimator is fine-grained or not."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 0.01,
- "description": "Epsilon, or small value to ensure :math:`qr_{max} - qr_{min}` must be greater than the epsilon."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "Pow2Quantize",
- "description": "This function simulates to quantize values in the power of 2 number representation,\nin other words, it is linear (uniform) quantization in :math:`log_2` domain.\n\nIn the forward pass of `signed` case,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax_{+} & if \\ \\ \\overline{q_i} > max_{+} \\\\\n\t\t\t\\overline{q_i} & if \\ \\ min_{+} \\le \\overline{q_i} \\le max_{+} \\\\\n\t\t min_{+} & if \\ \\ 0 \\le \\overline{q_i} < min_{+} \\\\\n\t\t min_{-} & if \\ \\ min_{-} < \\overline{q_i} < 0 \\\\\n\t\t \\overline{q_i} & if \\ \\ max_{-} \\le \\overline{q_i} \\le min_{-}\\\\\n\t \tmax_{-} & if \\ \\ \\overline{q_i} < max_{-} \\\\\n\t \\end{array} \\right.,\n\nwhere\n\n.. math::\n\n && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\\\\n && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\\\\n && \\overline{q_i} = sign(x_i) \\times 2^{round(\\log_2 |x_i|)}.\n\nThis quantization uses the geometric mean between two power-of-two numbers\nas quantization threshold.\n\nIn the forward pass of `unsigned` case,\n\n.. math::\n\n q_i= \\left\\{\n\t \\begin{array}{ll}\n\t\t\tmax & if \\ \\ \\overline{q_i} > max \\\\\n\t\t\t\\overline{q_i} & if \\ \\ min \\le \\overline{q_i} \\le max \\\\\n\t\t min & if \\ \\ 0 < \\overline{q_i} < min \\\\\n\t \\end{array} \\right.,\n\nwhere\n\n.. math::\n\n && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\\\\n && \\overline{q_i} = 2^{int(\\log_2 |x_i|)}.\n\n\nWhen using `with_zero` as true, a pruning threshold is used to round an input to\n0 or :math:`min`. The pruning threshold is defined in this function as the following,\n\n.. math::\n\n pruning\\ threshold = min \\times 2^{-\\frac{1}{2}}.\n\nIf an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.\n\nIn the backward pass when using ste_fine_grained as false,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i} = 1.\n\nIn the backward pass when using ste_fine_grained as true,\n\n.. math::\n\n \\frac{\\partial q_i}{\\partial x_i}= \\left\\{\n\t \\begin{array}{ll}\n\t\t\t0 & if \\ \\ \\overline{q_i} > max_{+} \\\\\n\t\t\t1 & if \\ \\ otherwise \\\\\n\t \t0 & if \\ \\ \\overline{q_i} < max_{-} \\\\\n\t \\end{array} \\right..\n\n\nThere are some literatures using pow2 quantization in their proposed methods.\n\nReferences:\n\n * `Miyashita Daisuke, Lee H. Edward, Murmann Boris.\n Convolutional Neural Networks using Logarithmic Data Representation.\n <https://arxiv.org/abs/1603.01025>`_\n\n * `Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu, Yurong Chen.\n Incremental Network Quantization: Towards Lossless CNNs with Low-precision Weights.\n <https://arxiv.org/abs/1702.03044>`_\n\n.. note::\n\n\n\tQuantized values are stored as floating point number, since this function is for simulation purposes.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "sign",
- "type": "boolean",
- "default": true,
- "description": "Indicate the signed number or the unsigned number. Default is true."
- },
- {
- "name": "with_zero",
- "type": "boolean",
- "default": true,
- "description": "Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit."
- },
- {
- "name": "n",
- "type": "int64",
- "default": 8,
- "description": "Bit width used, Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8."
- },
- {
- "name": "m",
- "type": "int64",
- "default": 1,
- "description": ":math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \\in \\mathcal{Z}`. Default is 1."
- },
- {
- "name": "ste_fine_grained",
- "type": "boolean",
- "default": true,
- "description": "Straight Through Estimator is fine-grained or not."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "Prune",
- "description": "Prune the input as the following equation,\n\n.. math::\n\n q_i = \\left \\{\n \\begin{array}{ll}\n 0 & abs(x_i) < threshold \\\\\n x_i & otherwise\n \\end{array}\n \\right.\n\nwhere :math:`threshold` is determined by `threshold = np.sort(np.abs(x))[int((x.size - 1) * rate)]`.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ],
- "attributes": [
- {
- "name": "rate",
- "type": "float32",
- "default": 0.9,
- "description": "Sparse rate, or pruning rate."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with the same shape as x"
- }
- ]
- },
- {
- "name": "QuantizeLinear",
- "description": "Quantize linearly inputs with the scale and zero point.\n\n.. math::\n\n y = saturate(round(x / scale) + zero_point).\n\n:math:`saturate` rage is determined by `dtype` and :math:`round` mode is selected\nby `round_mode`. :math:`zero_point` is constrained by the `dtype` range and its values are\nrounded by `round_mode`.\n\nThis function aligns with ONNX.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array."
- },
- {
- "name": "scale",
- "type": "nnabla.Variable",
- "description": "Scale N-D array. The values must be positive number."
- },
- {
- "name": "zero_point",
- "type": "nnabla.Variable",
- "description": "Zero point N-D array."
- }
- ],
- "attributes": [
- {
- "name": "round_mode",
- "type": "string",
- "default": "HALF_AWAY_FROM_ZERO",
- "description": "Rounding mode. HALF_AWAY_FROM_ZERO or HALF_TO_EVEN."
- },
- {
- "name": "narrow_range",
- "type": "boolean",
- "default": false,
- "description": "If true, this function does not use the minimum quantized value. For example, if `dtype` is int8 (the range is in [-128, 127]), the output range is corrected in [-127, 127]."
- },
- {
- "name": "dtype",
- "type": "int64",
- "default": 1,
- "description": "Data type for the output. The int value is compatible to the enumtype for the data type defined in `the numpy <https://github.com/numpy/numpy/blob/master/numpy/core/include/numpy/ndarraytypes.h>`_."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Input N-D array."
- }
- ]
- },
- {
- "name": "DequantizeLinear",
- "description": "Dequantize linearly inputs with the scale and zero point.\n\n.. math::\n\n y = (x - zero_point) * scale.\n\n:math:`zero_point` is constrained by the `dtype` range.\n\nThis function aligns with ONNX.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input N-D array."
- },
- {
- "name": "scale",
- "type": "nnabla.Variable",
- "description": "Scale N-D array. The values must be positive number. This should be same as one used in QuantizeLinear."
- },
- {
- "name": "zero_point",
- "type": "nnabla.Variable",
- "description": "Zero point N-D array. This should be same as one used in QuantizeLinear."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Input N-D array."
- }
- ]
- },
- {
- "name": "TopNError",
- "description": "Top N error along the dimension specified by the axis, the element of outputs is\n\n.. math::\n\n y_i = \\left \\{\n \\begin{array}{l}\n 1 \\ (x_i \\ is \\ not \\ within \\ N-th \\ place) \\\\\n 0 \\ (x_i \\ is \\ within \\ N-th \\ place)\n \\end{array}\n \\right.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Probabilities N-D array. :math:`D_1 \\times ... \\times D_i \\times ... \\times D_N`"
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "N-D array of labels. :math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`"
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis on which the top N error is calculated."
- },
- {
- "name": "n",
- "type": "int64",
- "default": 1,
- "description": "top N"
- }
- ],
- "outputs": [
- {
- "name": "output",
- "type": "nnabla.Variable",
- "description": "Element-wise error N-D array. (:math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`)"
- }
- ]
- },
- {
- "name": "BinaryError",
- "description": "Elementwise binary error.\n\n.. math::\n y_i = \\left \\{\n \\begin{array}{l}\n 0 ((x^{(0)} \\geq 0.5) = (x^{(1)} \\geq 0.5)) \\\\\n 1 ((x^{(0)} \\geq 0.5) \\neq (x^{(1)} \\geq 0.5))\n \\end{array}\n \\right.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Probabilities N-D array. :math:`-\\infty` to :math:`\\infty`."
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "Labels N-D array. Usually set as 0 or 1, but, it allows probability (0 to 1) as inputs."
- }
- ],
- "outputs": [
- {
- "name": "output",
- "type": "nnabla.Variable",
- "description": "Element-wise errors N-D array."
- }
- ]
- },
- {
- "name": "ConfusionMatrix",
- "description": "Confusion matrix.\nThe return value is already summed over samples.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Probabilities N-D array. (:math:`D_1 \\times ... \\times D_i \\times ... \\times D_N`)"
- },
- {
- "name": "target",
- "type": "nnabla.Variable",
- "description": "Labels N-D array. (:math:`D_1 \\times ... \\times 1 \\times ... \\times D_N`)"
- }
- ],
- "attributes": [
- {
- "name": "axis",
- "type": "int64",
- "default": "len(x.shape) - 1",
- "description": "Axis on which the confusion matrix is calculated."
- }
- ],
- "outputs": [
- {
- "name": "output",
- "type": "nnabla.Variable",
- "description": "Confusion matrix 2-D array. Col index is estimated class. Row index is label class."
- }
- ]
- },
- {
- "name": "VATNoise",
- "description": "Noise for virtual adversarial training.\n\nThis layer is a special layer for GUI network designing, specialized for getting\nthe noise of virtual adversarial training.\n\nIn the backward process, the weight parameter will be replaced with the gradient.\n\nForward\n\n.. math::\n y_i = \\frac{\\epsilon x_i}{\\sqrt{\\sum_k x_k^2 + c}}\n\nBackward\n\n.. math::\n \\delta x_i = 0\n\n.. math::\n w_i = \\epsilon \\delta y_i\n\nNote:\n This layer is a special layer for GUI network designing.\n\nReferences:\n * `Miyato et.al, Distributional Smoothing with Virtual Adversarial Training.\n <https://arxiv.org/abs/1507.00677>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array of noise input. Noise is standard Gaussian noise initially, but the next step, fed back gradient variable."
- },
- {
- "name": "w",
- "type": "nnabla.Variable",
- "description": "N-D array for keep gradient values."
- }
- ],
- "attributes": [
- {
- "name": "base_axis",
- "type": "int64",
- "default": 1,
- "description": "Dimensions up to base_axis is treated as sample dimension."
- },
- {
- "name": "eps",
- "type": "float32",
- "default": 1.0,
- "description": "Noise norm (l2) factor."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array"
- }
- ]
- },
- {
- "name": "Unlink",
- "description": "This function behaves as an identity function on the forward pass,\nand deletes the gradient for the background pass.\n\nThis layer is a special layer for GUI network designing, used for getting\nzero backward operation by adding this layer.\n\nForward\n\n.. math::\n y_i = x_i\n\nBackward\n\n.. math::\n \\delta x_i = 0\n\nNote:\n This layer is a special layer for GUI network designing.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array."
- }
- ]
- },
- {
- "name": "Sink",
- "description": "Creates a dummy variable used to call forward or backward function\nof multiple variables at one place.\n\nThis takes any numbers of input variables with any shape,\nand creates a single 0-shape outputs.\nThe forward pass does nothing. The backward pass set ones\nto the input grads if one_input_grad is set as true.\n\nNote:\n ``sink`` can only be called at the very end of the graph, and\n ``grad`` of input variables are cleared\n when ``y.backward(clear_buffer=True)`` is called.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "Any number of inputs with any shape."
- }
- ],
- "attributes": [
- {
- "name": "one_input_grad",
- "type": "boolean",
- "default": true,
- "description": "Set grads of inputs as one during backward. It is useful to set false if you want to set external gradients to the input variables."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "Dummy variable."
- }
- ]
- },
- {
- "name": "NmsDetection2d",
- "description": "Non-Maximum Suppression (NMS) to 2D Object detector output.\nThe input is a 3-dimensional tensor with shape of ``(B, N, 5 + C)``\nwhere ``B`` denotes batch size, ``N`` denotes the number of detection box\ncandidates, and ``C`` denotes the number of classes of object detection.\n``5 + C`` consists of the box coordinates ``x, y, w, h`` in normalized\ncoordinates (size of each x and y are 1.0), objectness\n(learned to predict IoU value to ground truth box), and the class probabilities of ``C`` classes.\nIt outputs a tensor with the same dimensions as the input, where all\nvalues are copied from the input to the output, except the class\nprobabilities are multiplied by objectness, and possibly suppressed to 0\nby NMS.\nDuring NMS, all of combination of pairs of bounding boxes is compared.\nFor each pair, the bounding box with a lower detection score\n(described below) is suppressed if the overlap ratio (the IoU)\nis greater than the value of ``nms``.\n\nThere are two suppression modes for NMS.\n\n1. Suppress by class probability (``nms_per_class`` is ``True``):\nFor each bounding box, the detection score is calculated by\n``objectness * probability[class_id]`` for each class.\nThe suppression is done for each class independently.\n\n2. Suppress by objectness (``nms_per_class`` is ``False``):\nThe suppression is done for each bounding box using ``objectness``\nas a detection score. All class probabilities becomes 0 for\nevery suppressed boxes.\n\nReferences:\n * `Joseph Redmon, Ali Farhadi, YOLO9000: Better, Faster, Stronger.\n <https://arxiv.org/abs/1612.08242>`_",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "A 3-dimensional array."
- }
- ],
- "attributes": [
- {
- "name": "thresh",
- "type": "float32",
- "default": 0.5,
- "description": "Detection score threshold."
- },
- {
- "name": "nms",
- "type": "float32",
- "default": 0.45,
- "description": "IoU threshold for Non-maximum suppression (NMS)."
- },
- {
- "name": "nms_per_class",
- "type": "boolean",
- "default": true,
- "description": "If true, NMS is applied for each class."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A 3-dim array with the same dimensions with the input."
- }
- ]
- },
- {
- "name": "ONNXNonMaxSuppression",
- "description": "Non-Maximum Suppression (NMS) to 2D Object detector output. This function\nprovides a ONNX-compatible interface of Non-Maximum Suppression.\nThe first input is a 3-dimensional bounding box tensor with shape of\n``(B, N, 4)`` where ``B`` denotes batch size and ``N`` denotes the \nnumber of detection box candidates.\n``4`` consists of the box coordinates ``y1, x1, y2, x2`` in normalized\ncoordinates (size of each x and y are 1.0).\nThe second input is a 3-dimensional score tensor with shape of\n``(B, C, N)`` where ``C`` denotes the number of classes of object\ndetection.\nIt outputs the indices of the selected boxes as a tensor with shape of\n``(M, 3)`` where ``M`` denotes the number of the selected boxes.\n``3`` consists of 3-dimensional indices\n``batch_index, class_index, box_index``.\n\nReferences:\n * `Joseph Redmon, Ali Farhadi, YOLO9000: Better, Faster, Stronger.\n <https://arxiv.org/abs/1612.08242>`_\n * `ONNX Operators documentation.\n <https://github.com/onnx/onnx/blob/main/docs/Operators.md>`",
- "inputs": [
- {
- "name": "boxes",
- "type": "nnabla.Variable",
- "description": "A 3-dimensional array."
- },
- {
- "name": "scores",
- "type": "nnabla.Variable",
- "description": "A 3-dimensional array."
- }
- ],
- "attributes": [
- {
- "name": "center_point_box",
- "type": "int64",
- "default": 0,
- "description": "Bounding box format (0 or 1)."
- },
- {
- "name": "max_output_boxes_per_class",
- "type": "int64",
- "default": 0,
- "description": "The maximum number of boxes selected per batch per class."
- },
- {
- "name": "iou_threshold",
- "type": "float32",
- "default": 0.0,
- "description": "IoU threshold for Non-maximum suppression (NMS)."
- },
- {
- "name": "score_threshold",
- "type": "float32",
- "default": 0.0,
- "description": "Detection score threshold."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A 2-dimensional array."
- }
- ]
- },
- {
- "name": "MaxPoolingBackward",
- "description": "Max pooling backward. This aims to support the n-th order gradients of \nthe max pooling. The document of this function must not be shown, and \nthe function must not be called in the end-user side.",
- "inputs": [
- {
- "name": "dy",
- "type": "nnabla.Variable",
- "description": "Input variable."
- },
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "Input variable."
- }
- ],
- "attributes": [
- {
- "name": "kernel",
- "required": true,
- "type": "shape",
- "description": "Kernel sizes for each spatial axis."
- },
- {
- "name": "stride",
- "type": "shape",
- "default": "kernel",
- "description": "Subsampling factors for each spatial axis."
- },
- {
- "name": "ignore_border",
- "type": "boolean",
- "default": true,
- "description": "If false, kernels covering borders are also considered for the output."
- },
- {
- "name": "pad",
- "type": "shape",
- "default": "(0,) * len(kernel)",
- "description": "Border padding values for each spatial axis. Padding will be added both sides of the dimension."
- },
- {
- "name": "channel_last",
- "type": "boolean",
- "default": false,
- "description": "If True, the last dimension is considered as channel dimension, a.k.a. NHWC order."
- }
- ],
- "outputs": [
- {
- "name": "dx",
- "type": "nnabla.Variable",
- "description": "Output"
- }
- ],
- "category": "Pool"
- },
- {
- "name": "PatchCorrelation",
- "description": "Multiplicative patch-wise comparision between inputs `x1` and `x2`, which\n must both be 4-dimensional NCHW (with `channel_last=False`) or NHWC (with\n `channel_last=True`) arrays (where *N* is the number of samples, *H* and\n *W* are the sample height and width and *C* is the number of channels).\n The function returns a 5-D array with shape :math:`(N, C_y, C_x, H_o, W_o)`\n where :math:`H_o, W_o` are determined by the possible patch locations within\n the, optionally padded, input image size and :math:`C_y, C_x` are determined\n by the optionally shifted patch positions.\n\n Mathmatically, the patch correlation is formulated as\n\n .. math::\n\n O(s_y, s_x, h_0, w_0) =\n \\sum_{c} \\sum_{k_h} \\sum_{k_w} I_1(c, h + k_h, w + k_w) \\times I_2(c, h + k_h + s_h, w + k_w + s_w),\n\n where :math:`I_1(c, h, w)` and :math:`I_2(c, h, w)` are the inputs at :math:`c`-th channel,\n :math:`h`-th height, and :math:`w`-th width, :math:`k_h, k_w` indices for the patch size\n and :math:`s_h, s_w` indices for the shifts.\n\n A single correlation value (per sample) is produced if the patch extends\n to the image dimensions and all other parameters use the default values.\n\n >>> import numpy as np, nnabla as nn, nnabla.functions as F\n >>> nn.set_auto_forward(True)\n >>> N, C, H, W = (1, 2, 3, 4)\n >>> x = nn.Variable.from_numpy_array(np.ones([N, C, H, W]))\n >>> F.patch_correlation(x, x, patch=(H, W)).d\n array([[[[[24.]]]]], dtype=float32)\n\n A patch that is smaller than the image size moves horizontal and vertical\n producing a value per position. The `patch_step` argument may be used to\n control the position increments.\n\n >>> F.patch_correlation(x, x, patch=(H-1, W-1)).d\n array([[[[[12., 12.],\n [12., 12.]]]]], dtype=float32)\n >>> F.patch_correlation(x, x, patch=(H-1, W-1), patch_step=(2, 1)).d\n array([[[[[12., 12.]]]]], dtype=float32)\n\n Multiple correlations may be performed at each position between the patch\n from `x1` and patches from `x2` at relative offsets striding the maximum\n vertical and horizontal distance given by the `shift` values at increments\n of `shift_step`. The shifted correlation values can be obtained for the\n from the second and third output dimension for the vertical and horizontal\n shifts.\n\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1)).shape\n (1, 1, 3, 1, 4)\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1)).d\n array([[[[[0., 6., 6., 6.]],\n [[6., 6., 6., 6.]],\n [[6., 6., 6., 0.]]]]], dtype=float32)\n >>> F.patch_correlation(x, x, (H, 1), shift=(0, 1), shift_step=(1, 2)).d\n array([[[[[0., 6., 6., 6.]],\n [[6., 6., 6., 0.]]]]], dtype=float32)\n\n Padding with zero values may be applied individually to the top, bottom,\n left and right side of the input image.\n\n >>> F.patch_correlation(x, x, patch=(H, W), padding=(0, 1, W, W)).d\n array([[[[[ 0., 6., 12., 18., 24., 18., 12., 6., 0.],\n [ 0., 4., 8., 12., 16., 12., 8., 4., 0.]]]]], dtype=float32)\n\n This function may be used to implement the FlowNetC correlation layer.\n\n >>> N, C, H, W = (1, 256, 44, 60)\n >>> x1, x2 = nn.Variable((N, C, H, W)), nn.Variable((N, C, H, W))\n >>> F.patch_correlation(x1, x2, shift=20, shift_step=2).shape\n (1, 21, 21, 44, 60)\n\n References:\n\n * `Fischer et al., FlowNet: Learning Optical Flow with Convolutional\n Networks. <https://arxiv.org/abs/1504.06852>`_",
- "inputs": [
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(N, H, W, C)`."
- },
- {
- "name": "x2",
- "type": "nnabla.Variable",
- "description": "Input N-D array with shape :math:`(N, H, W, C)`."
- }
- ],
- "attributes": [
- {
- "name": "patch",
- "type": "shape",
- "default": "(1, 1)",
- "description": "A tuple with height and width of the correlation patch. A single integer expands to identical height and width."
- },
- {
- "name": "shift",
- "type": "shape",
- "default": "(0, 0)",
- "description": "A tuple of maximum vertical and horizontal displacement of patches from `x2` that are correlated with a single patch from `x1`. A single integer expands to identical vertical and horizontal displacement."
- },
- {
- "name": "patch_step",
- "type": "shape",
- "default": "(1, 1)",
- "description": "A tuple of vertical and horizontal increments for advancing the position of the correlation patch within the input image shape. A single integer expands to identical vertical and horizontal increments."
- },
- {
- "name": "shift_step",
- "type": "shape",
- "default": "(1, 1)",
- "description": "A tuple of vertical and horizontal increments for advancing the relative offset position within the shift range. A single integer expands to identical vertical and horizontal increments."
- },
- {
- "name": "padding",
- "type": "shape",
- "default": "(0, 0, 0, 0)",
- "description": "A tuple of top, bottom, left and right padding extent. A tuple of two values yields identical top/bottom and left/right padding from the first and second tuple value. A single integer expands to identical padding extent for all sides."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "N-D array with shape :math:`(N, C_y, C_x, H_o, W_o)`.\n\nA spatial size of the output is calculated as\n\n.. math::\n\n H_o = \\frac{H + (top\\_pad + bottom\\_pad) - patch_v }{patch\\_step_v} + 1.\n\nA channel size of the output is calculated as\n\n.. math::\n\n C_y = \\frac{2 \\times shift_v}{shift\\_step_v} + 1.\n\n:math:`W_o` and :math:`C_x` are the same calculation with differenct components."
- }
- ]
- },
- {
- "name": "Unique",
- "description": "Find the unique elements of input array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- }
- ],
- "attributes": [
- {
- "name": "flatten",
- "type": "boolean",
- "default": true,
- "description": "If True, unique values of the flatten input array are returned."
- },
- {
- "name": "axis",
- "type": "int64",
- "default": "None",
- "description": "If flatten is True and axis is specified, unique slices along axis are returned."
- },
- {
- "name": "sorted",
- "type": "boolean",
- "default": true,
- "description": "If True, unique values/slices sorted in ascending order are returned."
- },
- {
- "name": "with_index",
- "type": "boolean",
- "default": false,
- "description": "If True, `indices` is returned."
- },
- {
- "name": "with_inverse",
- "type": "boolean",
- "default": false,
- "description": "If True, `inverse_indices` is returned."
- },
- {
- "name": "with_counts",
- "type": "boolean",
- "default": false,
- "description": "If True, `counts` is returned."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- },
- {
- "name": "indices",
- "type": "nnabla.Variable",
- "description": "A 1-D array. It's indices of `y` elements first occurance in `x`. If `flatten` is True, it contains indices to flattend input array `x`. If `flatten` is False and `axis` is specified, it contains indices to input array `x` on `axis`."
- },
- {
- "name": "inverse_indices",
- "type": "nnabla.Variable",
- "description": "A 1-D array. It's indices of `x` elements corresponding to `y`. If `flatten` is True, it contains indices to output array `y`. If `flatten` is False and `axis` is specified, it contains indices to output array `y` on `axis`."
- },
- {
- "name": "counts",
- "type": "nnabla.Variable",
- "description": "A 1-D array. It's the count of each element of 'y' in input array `x`."
- }
- ]
- },
- {
- "name": "EyeLike",
- "description": "Generate a 2-D array with ones on the diagonal, specified by `k`, and zeros elsewhere.\nThe shape of the output array is the same as the input array.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "A 2-D array."
- }
- ],
- "attributes": [
- {
- "name": "k",
- "type": "int64",
- "default": 0,
- "description": "Index of the diagonal. The default value 0 means the main diagonal, a positive value means an upper diagonal, and a negative value means a lower diagonal."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A 2-D array."
- }
- ]
- },
- {
- "name": "Mod2",
- "description": "Element-wise remainder function.\nThe behavior of this opeator is determined by x0's dtype and the `fmod` argument:\n\n.. math::\n y_i = \\left\\{\n \\begin{array}{ll}\n \\text{numpy.fmod}(x_{0,i}, x_{1,i})\n & (x_{0} \\text{has a floating-point type or fmod is True})\\\\\n \\text{numpy.mod}(x_{0,i}, x_{1,i})\n & (\\text{otherwise})\n \\end{array} \\right..",
- "inputs": [
- {
- "name": "x0",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- },
- {
- "name": "x1",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- }
- ],
- "attributes": [
- {
- "name": "fmod",
- "type": "boolean",
- "default": false,
- "description": "If True, this operator behaves like numpy.fmod, otherwise it behaves like numpy.mod."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- }
- ]
- },
- {
- "name": "BitShift",
- "description": "Element-wise bit shift function.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "description": "A N-D array. Its dtype must be one of the unsigned integer types."
- },
- {
- "name": "shift",
- "type": "nnabla.Variable",
- "description": "A N-D array. Its dtype is casted to x's dtype at run-time."
- }
- ],
- "attributes": [
- {
- "name": "direction",
- "type": "string",
- "default": "LEFT",
- "description": "Direction of bit shift."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- }
- ]
- },
- {
- "name": "Einsum",
- "description": "Evaluates the Einstein summation convention on the inputs.\nSee the numpy.einsum documentation for more information about equation.",
- "inputs": [
- {
- "name": "x",
- "type": "nnabla.Variable",
- "list": true,
- "description": "List of N-D array."
- }
- ],
- "attributes": [
- {
- "name": "equation",
- "type": "string",
- "default": "",
- "description": "A string that folllows Einstein summation convention."
- }
- ],
- "outputs": [
- {
- "name": "y",
- "type": "nnabla.Variable",
- "description": "A N-D array."
- }
- ]
- }
- ]
|