coreml-metadata.json 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. [
  2. {
  3. "name": "activation",
  4. "category": "Activation",
  5. "description": "Applies specified type of activation function to input."
  6. },
  7. {
  8. "name": "add",
  9. "description": "A layer that performs elementwise addition.",
  10. "inputs": [
  11. { "name": "x" },
  12. { "name": "y" }
  13. ],
  14. "outputs": [
  15. { "name": "z" }
  16. ]
  17. },
  18. {
  19. "name": "average",
  20. "description": "A layer that computes the elementwise average of the inputs."
  21. },
  22. {
  23. "name": "batchnorm",
  24. "category": "Normalization",
  25. "description": "A layer that performs batch normalization, which is performed along the channel axis, and repeated along the other axes, if present.",
  26. "attributes": [
  27. { "name": "epsilon", "default": 0.000009999999747378752 },
  28. { "name": "computeMeanVar", "visible": false },
  29. { "name": "instanceNormalization", "visible": false }
  30. ]
  31. },
  32. {
  33. "name": "bias",
  34. "category": "Layer",
  35. "description": "A layer that performs elementwise addition of a bias, which is broadcasted to match the input shape."
  36. },
  37. {
  38. "name": "biDirectionalLSTM",
  39. "category": "Layer",
  40. "description": "Bidirectional long short-term memory (LSTM) layer. The first LSTM operates on the input sequence in the forward direction. The second LSTM operates on the input sequence in the reverse direction.",
  41. "inputs": [
  42. { "name": "input" },
  43. { "name": "h" },
  44. { "name": "c" },
  45. { "name": "h_rev" },
  46. { "name": "c_rev" },
  47. { "name": "inputGateWeightMatrix", "visible": false },
  48. { "name": "forgetGateWeightMatrix", "visible": false },
  49. { "name": "blockInputWeightMatrix", "visible": false },
  50. { "name": "outputGateWeightMatrix", "visible": false },
  51. { "name": "inputGateRecursionMatrix", "visible": false },
  52. { "name": "forgetGateRecursionMatrix", "visible": false },
  53. { "name": "blockInputRecursionMatrix", "visible": false },
  54. { "name": "outputGateRecursionMatrix", "visible": false },
  55. { "name": "inputGateBiasVector", "visible": false },
  56. { "name": "forgetGateBiasVector", "visible": false },
  57. { "name": "blockInputBiasVector", "visible": false },
  58. { "name": "outputGateBiasVector", "visible": false },
  59. { "name": "inputGateWeightMatrix_rev", "visible": false },
  60. { "name": "forgetGateWeightMatrix_rev", "visible": false },
  61. { "name": "blockInputWeightMatrix_rev", "visible": false },
  62. { "name": "outputGateWeightMatrix_rev", "visible": false },
  63. { "name": "inputGateRecursionMatrix_rev", "visible": false },
  64. { "name": "forgetGateRecursionMatrix_rev", "visible": false },
  65. { "name": "blockInputRecursionMatrix_rev", "visible": false },
  66. { "name": "outputGateRecursionMatrix_rev", "visible": false },
  67. { "name": "inputGateBiasVector_rev", "visible": false },
  68. { "name": "forgetGateBiasVector_rev", "visible": false },
  69. { "name": "blockInputBiasVector_rev", "visible": false },
  70. { "name": "outputGateBiasVector_rev", "visible": false }
  71. ],
  72. "outputs": [
  73. { "name": "output" },
  74. { "name": "h" },
  75. { "name": "c" },
  76. { "name": "h_rev" },
  77. { "name": "c_rev" }
  78. ]
  79. },
  80. {
  81. "name": "concat",
  82. "category": "Tensor",
  83. "description": "A layer that concatenates along the channel axis (default) or sequence axis.",
  84. "inputs": [
  85. { "name": "inputs", "type": "Tensor[]" }
  86. ]
  87. },
  88. {
  89. "name": "convolution",
  90. "category": "Layer",
  91. "description": "A layer that performs spatial convolution or deconvolution.",
  92. "attributes": [
  93. { "name": "outputShape", "type": "uint64[]", "description": "Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True. When is_deconv == False, this parameter is ignored. If it is None, the output shape is calculated automatically using the border_mode. Kindly refer to NeuralNetwork.proto for details.", "visible": false },
  94. { "name": "outputChannels", "type": "uint64", "description": "The number of kernels. Same as ``C_out`` used in the layer description.", "visible": false },
  95. { "name": "kernelChannels", "type": "uint64", "description": "Channel dimension of the kernels. Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False. Must be equal to ``inputChannels``, if isDeconvolution == True.", "visible": false },
  96. { "name": "nGroups", "type": "uint64", "description": "Group convolution, i.e. weight reuse along channel axis. Input and kernels are divided into g groups and convolution / deconvolution is applied within the groups independently. If not set or 0, it is set to the default value 1.", "default": 1 },
  97. { "name": "isDeconvolution", "type": "boolean", "description": "Flag to specify whether it is a deconvolution layer." },
  98. { "name": "valid", "type": "ValidPadding", "visible": false },
  99. { "name": "same", "type": "SamePadding", "visible": false },
  100. { "name": "dilationFactor", "type": "uint64[]", "default": [ 1, 1 ] },
  101. { "name": "stride", "type": "uint64[]", "default": [ 1, 1 ] },
  102. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  103. { "name": "hasBias", "type": "boolean", "description": "Flag to specify whether a bias is to be added or not.", "visible": false }
  104. ]
  105. },
  106. {
  107. "name": "crop",
  108. "category": "Data",
  109. "description": "A layer that crops the spatial dimensions of an input. If two inputs are provided, the shape of the second input is used as the reference shape.",
  110. "inputs": [
  111. { "name": "x1" },
  112. { "name": "x2" }
  113. ],
  114. "outputs": [
  115. { "name": "y" }
  116. ]
  117. },
  118. {
  119. "name": "dot",
  120. "description": "If true, inputs are normalized first, thereby computing the cosine similarity."
  121. },
  122. {
  123. "name": "embedding",
  124. "category": "Transform",
  125. "description": "A layer that performs a matrix lookup and optionally adds a bias."
  126. },
  127. {
  128. "name": "featureVectorizer",
  129. "inputs": [
  130. { "name": "inputs", "type": "Tensor[]" }
  131. ]
  132. },
  133. {
  134. "name": "flatten",
  135. "category": "Shape",
  136. "description": "A layer that flattens the input.",
  137. "attributes": [
  138. { "name": "mode", "type": "FlattenLayerParams.FlattenOrder" }
  139. ]
  140. },
  141. {
  142. "name": "gather",
  143. "category": "Transform",
  144. "description": "Gather layer that gathers elements from the first input, along a specified axis, at indices specified in the second input.",
  145. "inputs": [
  146. { "name": "input", "type": "Tensor" },
  147. { "name": "indices", "type": "Tensor" }
  148. ]
  149. },
  150. {
  151. "name": "gelu",
  152. "category": "Activation",
  153. "description": "Gaussian error linear unit activation.",
  154. "attributes": [
  155. { "name": "mode", "type": "GeluLayerParams.GeluMode" }
  156. ]
  157. },
  158. {
  159. "name": "gru",
  160. "category": "Layer",
  161. "description": "Gated-Recurrent Unit (GRU) Layer",
  162. "inputs": [
  163. { "name": "input" },
  164. { "name": "h" },
  165. { "name": "updateGateWeightMatrix", "visible": false },
  166. { "name": "resetGateWeightMatrix", "visible": false },
  167. { "name": "outputGateWeightMatrix", "visible": false },
  168. { "name": "updateGateRecursionMatrix", "visible": false },
  169. { "name": "resetGateRecursionMatrix", "visible": false },
  170. { "name": "outputGateRecursionMatrix", "visible": false },
  171. { "name": "updateGateBiasVector", "visible": false },
  172. { "name": "resetGateBiasVector", "visible": false },
  173. { "name": "outputGateBiasVector", "visible": false }
  174. ],
  175. "outputs": [
  176. { "name": "output" },
  177. { "name": "h" }
  178. ]
  179. },
  180. {
  181. "name": "innerProduct",
  182. "category": "Layer",
  183. "description": "A layer that performs a matrix vector product. This is equivalent to a fully-connected, or dense layer.",
  184. "attributes": [
  185. { "name": "inputChannels", "type": "uint64", "visible": false },
  186. { "name": "outputChannels", "type": "uint64", "visible": false },
  187. { "name": "hasBias", "type": "boolean", "visible": false }
  188. ]
  189. },
  190. {
  191. "name": "int64ClassLabels",
  192. "category": "Data",
  193. "outputs": [
  194. { "name": "probabilities" },
  195. { "name": "feature" }
  196. ]
  197. },
  198. {
  199. "name": "itemSimilarityRecommender",
  200. "inputs": [
  201. { "name": "item" },
  202. { "name": "numRecommendations" },
  203. { "name": "itemRestriction" },
  204. { "name": "itemExclusion" }
  205. ],
  206. "outputs": [
  207. { "name": "recommendedItemList" },
  208. { "name": "recommendedItemScore" }
  209. ]
  210. },
  211. {
  212. "name": "l2normalize",
  213. "category": "Normalization",
  214. "description": "A layer that performs L2 normalization, i.e. divides by the the square root of the sum of squares of all elements of input."
  215. },
  216. {
  217. "name": "loadConstant",
  218. "category": "Data"
  219. },
  220. {
  221. "name": "lrn",
  222. "category": "Normalization",
  223. "description": "A layer that performs local response normalization (LRN).",
  224. "attributes": [
  225. { "name": "k", "default": 1 }
  226. ]
  227. },
  228. {
  229. "name": "max",
  230. "description": "A layer that computes the elementwise maximum over the inputs."
  231. },
  232. {
  233. "name": "min",
  234. "description": "A layer that computes the elementwise minimum over the inputs."
  235. },
  236. {
  237. "name": "multiply",
  238. "description": "A layer that performs elementwise multiplication.",
  239. "inputs": [
  240. { "name": "x" },
  241. { "name": "y" }
  242. ],
  243. "outputs": [
  244. { "name": "z" }
  245. ]
  246. },
  247. {
  248. "name": "mvn",
  249. "description": "Fill a constant value in the padded region."
  250. },
  251. {
  252. "name": "mvn",
  253. "category": "Normalization",
  254. "description": "A layer that performs mean variance normalization, along axis = -3."
  255. },
  256. {
  257. "name": "nonMaximumSuppression",
  258. "attributes": [
  259. { "name": "iouThreshold" },
  260. { "name": "confidenceThreshold" }
  261. ],
  262. "inputs": [
  263. { "name": "confidence" },
  264. { "name": "coordinates" },
  265. { "name": "iouThreshold" },
  266. { "name": "confidenceThreshold" }
  267. ],
  268. "outputs": [
  269. { "name": "confidence" },
  270. { "name": "coordinates" }
  271. ]
  272. },
  273. {
  274. "name": "padding",
  275. "category": "Shape",
  276. "description": "Fill a constant value in the padded region.",
  277. "attributes": [
  278. { "name": "paddingAmounts", "visible": false }
  279. ]
  280. },
  281. {
  282. "name": "permute",
  283. "category": "Shape",
  284. "description": "A layer that rearranges the dimensions and data of an input."
  285. },
  286. {
  287. "name": "pooling",
  288. "category": "Pool",
  289. "description": "Spatial Pooling layer to reduce dimensions of input using the specified kernel size and type.",
  290. "attributes": [
  291. { "name": "includeLastPixel", "type": "ValidCompletePadding", "visible": false },
  292. { "name": "same", "type": "SamePadding", "visible": false },
  293. { "name": "valid", "type": "ValidCompletePadding", "visible": false },
  294. { "name": "type", "type": "PoolingLayerParams.PoolingType" },
  295. { "name": "globalPooling", "type": "boolean", "default": false },
  296. { "name": "stride", "type": "uint64", "default": [ 1, 1 ] },
  297. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  298. { "name": "avgPoolExcludePadding", "type": "boolean", "default": false }
  299. ]
  300. },
  301. {
  302. "name": "reduce",
  303. "description": "A layer that reduces the input using a specified operation."
  304. },
  305. {
  306. "name": "reorganizeData",
  307. "category": "Shape",
  308. "description": "A layer that reorganizes data in the input in: 1. SPACE_TO_DEPTH, 2. DEPTH_TO_SPACE."
  309. },
  310. {
  311. "name": "reshape",
  312. "category": "Shape",
  313. "description": "A layer that recasts the input into a new shape."
  314. },
  315. {
  316. "name": "scale",
  317. "category": "Layer",
  318. "description": "A layer that performs elmentwise multiplication by a scale factor and optionally adds a bias.",
  319. "attributes": [
  320. { "name": "hasBias", "type": "boolean", "visible": false }
  321. ]
  322. },
  323. {
  324. "name": "scaler",
  325. "category": "Data"
  326. },
  327. {
  328. "name": "sequenceRepeat",
  329. "category": "Shape",
  330. "description": "A layer that repeats a sequence."
  331. },
  332. {
  333. "name": "slice",
  334. "description": "A layer that slices the input data along a given axis."
  335. },
  336. {
  337. "name": "slice",
  338. "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs."
  339. },
  340. {
  341. "name": "softmax",
  342. "category": "Activation",
  343. "description": "A layer that performs softmax normalization. Normalization is done along the channel axis."
  344. },
  345. {
  346. "name": "softmaxND",
  347. "category": "Activation",
  348. "description": "A layer that performs softmax normalization along a specified axis."
  349. },
  350. {
  351. "name": "squeeze",
  352. "category": "Transform"
  353. },
  354. {
  355. "name": "stringClassLabels",
  356. "category": "Data",
  357. "outputs": [
  358. { "name": "probabilities" },
  359. { "name": "feature" }
  360. ]
  361. },
  362. {
  363. "name": "textClassifier",
  364. "attributes": [
  365. { "name": "revision", "visible": false }
  366. ]
  367. },
  368. {
  369. "name": "unary",
  370. "description": "A layer that applies a unary function.",
  371. "attributes": [
  372. { "name": "type", "type": "UnaryFunctionLayerParams.Operation" },
  373. { "name": "alpha", "default": 1 },
  374. { "name": "scale", "default": 1 },
  375. { "name": "epsilon", "default": 9.999999974752427e-7 }
  376. ],
  377. "inputs": [
  378. { "name": "x" }
  379. ],
  380. "outputs": [
  381. { "name": "z" }
  382. ]
  383. },
  384. {
  385. "name": "uniDirectionalLSTM",
  386. "category": "Layer",
  387. "description": "A unidirectional long short-term memory (LSTM) layer.",
  388. "inputs": [
  389. { "name": "input" },
  390. { "name": "h" },
  391. { "name": "c" },
  392. { "name": "inputGateWeightMatrix", "visible": false },
  393. { "name": "forgetGateWeightMatrix", "visible": false },
  394. { "name": "blockInputWeightMatrix", "visible": false },
  395. { "name": "outputGateWeightMatrix", "visible": false },
  396. { "name": "inputGateRecursionMatrix", "visible": false },
  397. { "name": "forgetGateRecursionMatrix", "visible": false },
  398. { "name": "blockInputRecursionMatrix", "visible": false },
  399. { "name": "outputGateRecursionMatrix", "visible": false },
  400. { "name": "inputGateBiasVector", "visible": false },
  401. { "name": "forgetGateBiasVector", "visible": false },
  402. { "name": "blockInputBiasVector", "visible": false },
  403. { "name": "outputGateBiasVector", "visible": false }
  404. ],
  405. "outputs": [
  406. { "name": "output" },
  407. { "name": "h" },
  408. { "name": "c" }
  409. ]
  410. },
  411. {
  412. "name": "upsample",
  413. "category": "Data",
  414. "description": "A layer that scales up spatial dimensions. It supports two modes: nearest neighbour (default) and bilinear."
  415. },
  416. {
  417. "name": "transpose",
  418. "category": "Transform"
  419. },
  420. {
  421. "name": "wordTagger",
  422. "attributes": [
  423. { "name": "revision", "visible": false }
  424. ],
  425. "outputs": [
  426. { "name": "tokens" },
  427. { "name": "tags" },
  428. { "name": "locations" },
  429. { "name": "lengths" }
  430. ]
  431. },
  432. {
  433. "name": "program:conv",
  434. "category": "Layer",
  435. "inputs": [
  436. { "name": "x" },
  437. { "name": "weight" },
  438. { "name": "bias" }
  439. ]
  440. },
  441. {
  442. "name": "program:batch_norm",
  443. "category": "Normalization",
  444. "inputs": [
  445. { "name": "x" },
  446. { "name": "mean" },
  447. { "name": "variance" },
  448. { "name": "gamma" },
  449. { "name": "beta" }
  450. ]
  451. },
  452. {
  453. "name": "program:linear",
  454. "category": "Layer",
  455. "inputs": [
  456. { "name": "x" },
  457. { "name": "weight" },
  458. { "name": "bias" }
  459. ]
  460. },
  461. {
  462. "name": "program:pad",
  463. "category": "Tensor"
  464. },
  465. {
  466. "name": "program:transpose",
  467. "category": "Transform"
  468. },
  469. {
  470. "name": "program:sigmoid",
  471. "category": "Activation"
  472. },
  473. {
  474. "name": "program:softmax",
  475. "category": "Activation"
  476. },
  477. {
  478. "name": "program:relu",
  479. "category": "Activation"
  480. },
  481. {
  482. "name": "program:relu6",
  483. "category": "Activation"
  484. },
  485. {
  486. "name": "program:reshape",
  487. "category": "Shape"
  488. },
  489. {
  490. "name": "program:concat",
  491. "category": "Tensor"
  492. },
  493. {
  494. "name": "program:layer_norm",
  495. "category": "Normalization"
  496. },
  497. {
  498. "name": "espresso:convolution",
  499. "category": "Layer"
  500. },
  501. {
  502. "name": "espresso:inner_product",
  503. "category": "Layer"
  504. },
  505. {
  506. "name": "espresso:activation",
  507. "category": "Activation"
  508. },
  509. {
  510. "name": "espresso:softmax",
  511. "category": "Activation"
  512. },
  513. {
  514. "name": "espresso:transpose",
  515. "category": "Transform"
  516. },
  517. {
  518. "name": "espresso:pool",
  519. "category": "Pool"
  520. },
  521. {
  522. "name": "espresso:instancenorm_1d",
  523. "category": "Normalization"
  524. },
  525. {
  526. "name": "espresso:batch_norm",
  527. "category": "Normalization"
  528. },
  529. {
  530. "name": "espresso:reshape",
  531. "category": "Shape"
  532. },
  533. {
  534. "name": "espresso:dynamic_quantize",
  535. "category": "Quantization"
  536. },
  537. {
  538. "name": "espresso:dynamic_dequantize",
  539. "category": "Quantization"
  540. },
  541. {
  542. "name": "espresso:concat",
  543. "category": "Tensor"
  544. },
  545. {
  546. "name": "espresso:upsample",
  547. "category": "Data"
  548. },
  549. {
  550. "name": "espresso:relu",
  551. "category": "Activation"
  552. },
  553. {
  554. "name": "espresso:prelu",
  555. "category": "Activation"
  556. },
  557. {
  558. "name": "espresso:tanh",
  559. "category": "Activation"
  560. },
  561. {
  562. "name": "espresso:squeeze",
  563. "category": "Transform"
  564. }
  565. ]