coreml-metadata.json 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. [
  2. {
  3. "name": "activation",
  4. "category": "Activation",
  5. "description": "Applies specified type of activation function to input."
  6. },
  7. {
  8. "name": "add",
  9. "description": "A layer that performs elementwise addition.",
  10. "inputs": [
  11. { "name": "x" },
  12. { "name": "y" }
  13. ],
  14. "outputs": [
  15. { "name": "z" }
  16. ]
  17. },
  18. {
  19. "name": "average",
  20. "description": "A layer that computes the elementwise average of the inputs."
  21. },
  22. {
  23. "name": "batchnorm",
  24. "category": "Normalization",
  25. "description": "A layer that performs batch normalization, which is performed along the channel axis, and repeated along the other axes, if present.",
  26. "attributes": [
  27. { "name": "epsilon", "default": 0.000009999999747378752 },
  28. { "name": "computeMeanVar", "visible": false },
  29. { "name": "instanceNormalization", "visible": false }
  30. ]
  31. },
  32. {
  33. "name": "bias",
  34. "category": "Layer",
  35. "description": "A layer that performs elementwise addition of a bias, which is broadcasted to match the input shape."
  36. },
  37. {
  38. "name": "biDirectionalLSTM",
  39. "category": "Layer",
  40. "description": "Bidirectional long short-term memory (LSTM) layer. The first LSTM operates on the input sequence in the forward direction. The second LSTM operates on the input sequence in the reverse direction.",
  41. "inputs": [
  42. { "name": "input" },
  43. { "name": "h" },
  44. { "name": "c" },
  45. { "name": "h_rev" },
  46. { "name": "c_rev" },
  47. { "name": "inputGateWeightMatrix", "visible": false },
  48. { "name": "forgetGateWeightMatrix", "visible": false },
  49. { "name": "blockInputWeightMatrix", "visible": false },
  50. { "name": "outputGateWeightMatrix", "visible": false },
  51. { "name": "inputGateRecursionMatrix", "visible": false },
  52. { "name": "forgetGateRecursionMatrix", "visible": false },
  53. { "name": "blockInputRecursionMatrix", "visible": false },
  54. { "name": "outputGateRecursionMatrix", "visible": false },
  55. { "name": "inputGateBiasVector", "visible": false },
  56. { "name": "forgetGateBiasVector", "visible": false },
  57. { "name": "blockInputBiasVector", "visible": false },
  58. { "name": "outputGateBiasVector", "visible": false },
  59. { "name": "inputGateWeightMatrix_rev", "visible": false },
  60. { "name": "forgetGateWeightMatrix_rev", "visible": false },
  61. { "name": "blockInputWeightMatrix_rev", "visible": false },
  62. { "name": "outputGateWeightMatrix_rev", "visible": false },
  63. { "name": "inputGateRecursionMatrix_rev", "visible": false },
  64. { "name": "forgetGateRecursionMatrix_rev", "visible": false },
  65. { "name": "blockInputRecursionMatrix_rev", "visible": false },
  66. { "name": "outputGateRecursionMatrix_rev", "visible": false },
  67. { "name": "inputGateBiasVector_rev", "visible": false },
  68. { "name": "forgetGateBiasVector_rev", "visible": false },
  69. { "name": "blockInputBiasVector_rev", "visible": false },
  70. { "name": "outputGateBiasVector_rev", "visible": false }
  71. ],
  72. "outputs": [
  73. { "name": "output" },
  74. { "name": "h" },
  75. { "name": "c" },
  76. { "name": "h_rev" },
  77. { "name": "c_rev" }
  78. ]
  79. },
  80. {
  81. "name": "concat",
  82. "category": "Tensor",
  83. "description": "A layer that concatenates along the channel axis (default) or sequence axis.",
  84. "inputs": [
  85. { "name": "inputs", "type": "Tensor[]" }
  86. ]
  87. },
  88. {
  89. "name": "convolution",
  90. "category": "Layer",
  91. "description": "A layer that performs spatial convolution or deconvolution.",
  92. "attributes": [
  93. { "name": "outputShape", "type": "uint64[]", "description": "Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True. When is_deconv == False, this parameter is ignored. If it is None, the output shape is calculated automatically using the border_mode. Kindly refer to NeuralNetwork.proto for details.", "visible": false },
  94. { "name": "outputChannels", "type": "uint64", "description": "The number of kernels. Same as ``C_out`` used in the layer description.", "visible": false },
  95. { "name": "kernelChannels", "type": "uint64", "description": "Channel dimension of the kernels. Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False. Must be equal to ``inputChannels``, if isDeconvolution == True.", "visible": false },
  96. { "name": "nGroups", "type": "uint64", "description": "Group convolution, i.e. weight reuse along channel axis. Input and kernels are divided into g groups and convolution / deconvolution is applied within the groups independently. If not set or 0, it is set to the default value 1.", "default": 1 },
  97. { "name": "isDeconvolution", "type": "boolean", "description": "Flag to specify whether it is a deconvolution layer." },
  98. { "name": "valid", "type": "ValidPadding", "visible": false },
  99. { "name": "same", "type": "SamePadding", "visible": false },
  100. { "name": "dilationFactor", "type": "uint64[]", "default": [ 1, 1 ] },
  101. { "name": "stride", "type": "uint64[]", "default": [ 1, 1 ] },
  102. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  103. { "name": "hasBias", "type": "boolean", "description": "Flag to specify whether a bias is to be added or not.", "visible": false }
  104. ]
  105. },
  106. {
  107. "name": "crop",
  108. "category": "Data",
  109. "description": "A layer that crops the spatial dimensions of an input. If two inputs are provided, the shape of the second input is used as the reference shape.",
  110. "inputs": [
  111. { "name": "x1" },
  112. { "name": "x2" }
  113. ],
  114. "outputs": [
  115. { "name": "y" }
  116. ]
  117. },
  118. {
  119. "name": "dot",
  120. "description": "If true, inputs are normalized first, thereby computing the cosine similarity."
  121. },
  122. {
  123. "name": "embedding",
  124. "category": "Transform",
  125. "description": "A layer that performs a matrix lookup and optionally adds a bias."
  126. },
  127. {
  128. "name": "featureVectorizer",
  129. "inputs": [
  130. { "name": "inputs", "type": "Tensor[]" }
  131. ]
  132. },
  133. {
  134. "name": "flatten",
  135. "category": "Shape",
  136. "description": "A layer that flattens the input.",
  137. "attributes": [
  138. { "name": "mode", "type": "FlattenLayerParams.FlattenOrder" }
  139. ]
  140. },
  141. {
  142. "name": "gather",
  143. "category": "Transform",
  144. "description": "Gather layer that gathers elements from the first input, along a specified axis, at indices specified in the second input.",
  145. "inputs": [
  146. { "name": "input", "type": "Tensor" },
  147. { "name": "indices", "type": "Tensor" }
  148. ]
  149. },
  150. {
  151. "name": "gelu",
  152. "category": "Activation",
  153. "description": "Gaussian error linear unit activation.",
  154. "attributes": [
  155. { "name": "mode", "type": "GeluLayerParams.GeluMode" }
  156. ]
  157. },
  158. {
  159. "name": "gru",
  160. "category": "Layer",
  161. "description": "Gated-Recurrent Unit (GRU) Layer",
  162. "inputs": [
  163. { "name": "input" },
  164. { "name": "h" },
  165. { "name": "updateGateWeightMatrix", "visible": false },
  166. { "name": "resetGateWeightMatrix", "visible": false },
  167. { "name": "outputGateWeightMatrix", "visible": false },
  168. { "name": "updateGateRecursionMatrix", "visible": false },
  169. { "name": "resetGateRecursionMatrix", "visible": false },
  170. { "name": "outputGateRecursionMatrix", "visible": false },
  171. { "name": "updateGateBiasVector", "visible": false },
  172. { "name": "resetGateBiasVector", "visible": false },
  173. { "name": "outputGateBiasVector", "visible": false }
  174. ],
  175. "outputs": [
  176. { "name": "output" },
  177. { "name": "h" }
  178. ]
  179. },
  180. {
  181. "name": "innerProduct",
  182. "category": "Layer",
  183. "description": "A layer that performs a matrix vector product. This is equivalent to a fully-connected, or dense layer.",
  184. "attributes": [
  185. { "name": "inputChannels", "type": "uint64", "visible": false },
  186. { "name": "outputChannels", "type": "uint64", "visible": false },
  187. { "name": "hasBias", "type": "boolean", "visible": false }
  188. ]
  189. },
  190. {
  191. "name": "int64ClassLabels",
  192. "category": "Data",
  193. "outputs": [
  194. { "name": "probabilities" },
  195. { "name": "feature" }
  196. ]
  197. },
  198. {
  199. "name": "itemSimilarityRecommender",
  200. "inputs": [
  201. { "name": "item" },
  202. { "name": "numRecommendations" },
  203. { "name": "itemRestriction" },
  204. { "name": "itemExclusion" }
  205. ],
  206. "outputs": [
  207. { "name": "recommendedItemList" },
  208. { "name": "recommendedItemScore" }
  209. ]
  210. },
  211. {
  212. "name": "l2normalize",
  213. "category": "Normalization",
  214. "description": "A layer that performs L2 normalization, i.e. divides by the the square root of the sum of squares of all elements of input."
  215. },
  216. {
  217. "name": "loadConstant",
  218. "category": "Data"
  219. },
  220. {
  221. "name": "lrn",
  222. "category": "Normalization",
  223. "description": "A layer that performs local response normalization (LRN).",
  224. "attributes": [
  225. { "name": "k", "default": 1 }
  226. ]
  227. },
  228. {
  229. "name": "max",
  230. "description": "A layer that computes the elementwise maximum over the inputs."
  231. },
  232. {
  233. "name": "min",
  234. "description": "A layer that computes the elementwise minimum over the inputs."
  235. },
  236. {
  237. "name": "multiply",
  238. "description": "A layer that performs elementwise multiplication.",
  239. "inputs": [
  240. { "name": "x" },
  241. { "name": "y" }
  242. ],
  243. "outputs": [
  244. { "name": "z" }
  245. ]
  246. },
  247. {
  248. "name": "mvn",
  249. "category": "Normalization",
  250. "description": "A layer that performs mean variance normalization, along axis = -3."
  251. },
  252. {
  253. "name": "nonMaximumSuppression",
  254. "attributes": [
  255. { "name": "iouThreshold" },
  256. { "name": "confidenceThreshold" }
  257. ],
  258. "inputs": [
  259. { "name": "confidence" },
  260. { "name": "coordinates" },
  261. { "name": "iouThreshold" },
  262. { "name": "confidenceThreshold" }
  263. ],
  264. "outputs": [
  265. { "name": "confidence" },
  266. { "name": "coordinates" }
  267. ]
  268. },
  269. {
  270. "name": "padding",
  271. "category": "Shape",
  272. "description": "Fill a constant value in the padded region.",
  273. "attributes": [
  274. { "name": "paddingAmounts", "visible": false }
  275. ]
  276. },
  277. {
  278. "name": "permute",
  279. "category": "Shape",
  280. "description": "A layer that rearranges the dimensions and data of an input."
  281. },
  282. {
  283. "name": "pooling",
  284. "category": "Pool",
  285. "description": "Spatial Pooling layer to reduce dimensions of input using the specified kernel size and type.",
  286. "attributes": [
  287. { "name": "includeLastPixel", "type": "ValidCompletePadding", "visible": false },
  288. { "name": "same", "type": "SamePadding", "visible": false },
  289. { "name": "valid", "type": "ValidCompletePadding", "visible": false },
  290. { "name": "type", "type": "PoolingLayerParams.PoolingType" },
  291. { "name": "globalPooling", "type": "boolean", "default": false },
  292. { "name": "stride", "type": "uint64", "default": [ 1, 1 ] },
  293. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  294. { "name": "avgPoolExcludePadding", "type": "boolean", "default": false }
  295. ]
  296. },
  297. {
  298. "name": "reduce",
  299. "description": "A layer that reduces the input using a specified operation."
  300. },
  301. {
  302. "name": "reorganizeData",
  303. "category": "Shape",
  304. "description": "A layer that reorganizes data in the input in: 1. SPACE_TO_DEPTH, 2. DEPTH_TO_SPACE."
  305. },
  306. {
  307. "name": "reshape",
  308. "category": "Shape",
  309. "description": "A layer that recasts the input into a new shape."
  310. },
  311. {
  312. "name": "scale",
  313. "category": "Layer",
  314. "description": "A layer that performs elmentwise multiplication by a scale factor and optionally adds a bias.",
  315. "attributes": [
  316. { "name": "hasBias", "type": "boolean", "visible": false }
  317. ]
  318. },
  319. {
  320. "name": "scaler",
  321. "category": "Data"
  322. },
  323. {
  324. "name": "sequenceRepeat",
  325. "category": "Shape",
  326. "description": "A layer that repeats a sequence."
  327. },
  328. {
  329. "name": "slice",
  330. "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs."
  331. },
  332. {
  333. "name": "softmax",
  334. "category": "Activation",
  335. "description": "A layer that performs softmax normalization. Normalization is done along the channel axis."
  336. },
  337. {
  338. "name": "softmaxND",
  339. "category": "Activation",
  340. "description": "A layer that performs softmax normalization along a specified axis."
  341. },
  342. {
  343. "name": "squeeze",
  344. "category": "Transform"
  345. },
  346. {
  347. "name": "stringClassLabels",
  348. "category": "Data",
  349. "outputs": [
  350. { "name": "probabilities" },
  351. { "name": "feature" }
  352. ]
  353. },
  354. {
  355. "name": "textClassifier",
  356. "attributes": [
  357. { "name": "revision", "visible": false }
  358. ]
  359. },
  360. {
  361. "name": "unary",
  362. "description": "A layer that applies a unary function.",
  363. "attributes": [
  364. { "name": "type", "type": "UnaryFunctionLayerParams.Operation" },
  365. { "name": "alpha", "default": 1 },
  366. { "name": "scale", "default": 1 },
  367. { "name": "epsilon", "default": 9.999999974752427e-7 }
  368. ],
  369. "inputs": [
  370. { "name": "x" }
  371. ],
  372. "outputs": [
  373. { "name": "z" }
  374. ]
  375. },
  376. {
  377. "name": "uniDirectionalLSTM",
  378. "category": "Layer",
  379. "description": "A unidirectional long short-term memory (LSTM) layer.",
  380. "inputs": [
  381. { "name": "input" },
  382. { "name": "h" },
  383. { "name": "c" },
  384. { "name": "inputGateWeightMatrix", "visible": false },
  385. { "name": "forgetGateWeightMatrix", "visible": false },
  386. { "name": "blockInputWeightMatrix", "visible": false },
  387. { "name": "outputGateWeightMatrix", "visible": false },
  388. { "name": "inputGateRecursionMatrix", "visible": false },
  389. { "name": "forgetGateRecursionMatrix", "visible": false },
  390. { "name": "blockInputRecursionMatrix", "visible": false },
  391. { "name": "outputGateRecursionMatrix", "visible": false },
  392. { "name": "inputGateBiasVector", "visible": false },
  393. { "name": "forgetGateBiasVector", "visible": false },
  394. { "name": "blockInputBiasVector", "visible": false },
  395. { "name": "outputGateBiasVector", "visible": false }
  396. ],
  397. "outputs": [
  398. { "name": "output" },
  399. { "name": "h" },
  400. { "name": "c" }
  401. ]
  402. },
  403. {
  404. "name": "upsample",
  405. "category": "Data",
  406. "description": "A layer that scales up spatial dimensions. It supports two modes: nearest neighbour (default) and bilinear."
  407. },
  408. {
  409. "name": "transpose",
  410. "category": "Transform"
  411. },
  412. {
  413. "name": "wordTagger",
  414. "attributes": [
  415. { "name": "revision", "visible": false }
  416. ],
  417. "outputs": [
  418. { "name": "tokens" },
  419. { "name": "tags" },
  420. { "name": "locations" },
  421. { "name": "lengths" }
  422. ]
  423. },
  424. {
  425. "name": "program:conv",
  426. "category": "Layer",
  427. "inputs": [
  428. { "name": "x" },
  429. { "name": "weight" },
  430. { "name": "bias" }
  431. ]
  432. },
  433. {
  434. "name": "program:batch_norm",
  435. "category": "Normalization",
  436. "inputs": [
  437. { "name": "x" },
  438. { "name": "mean" },
  439. { "name": "variance" },
  440. { "name": "gamma" },
  441. { "name": "beta" }
  442. ]
  443. },
  444. {
  445. "name": "program:linear",
  446. "category": "Layer",
  447. "inputs": [
  448. { "name": "x" },
  449. { "name": "weight" },
  450. { "name": "bias" }
  451. ]
  452. },
  453. {
  454. "name": "program:pad",
  455. "category": "Tensor"
  456. },
  457. {
  458. "name": "program:transpose",
  459. "category": "Transform"
  460. },
  461. {
  462. "name": "program:sigmoid",
  463. "category": "Activation"
  464. },
  465. {
  466. "name": "program:softmax",
  467. "category": "Activation"
  468. },
  469. {
  470. "name": "program:relu",
  471. "category": "Activation"
  472. },
  473. {
  474. "name": "program:relu6",
  475. "category": "Activation"
  476. },
  477. {
  478. "name": "program:reshape",
  479. "category": "Shape"
  480. },
  481. {
  482. "name": "program:concat",
  483. "category": "Tensor"
  484. },
  485. {
  486. "name": "program:layer_norm",
  487. "category": "Normalization"
  488. },
  489. {
  490. "name": "program:max_pool",
  491. "category": "Pool"
  492. },
  493. {
  494. "name": "program:gather",
  495. "category": "Transform"
  496. }
  497. ]