2
0

coreml-metadata.json 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. [
  2. {
  3. "name": "activation",
  4. "category": "Activation",
  5. "description": "Applies specified type of activation function to input."
  6. },
  7. {
  8. "name": "add",
  9. "description": "A layer that performs elementwise addition.",
  10. "inputs": [
  11. { "name": "x" },
  12. { "name": "y" }
  13. ],
  14. "outputs": [
  15. { "name": "z" }
  16. ]
  17. },
  18. {
  19. "name": "average",
  20. "description": "A layer that computes the elementwise average of the inputs."
  21. },
  22. {
  23. "name": "batchnorm",
  24. "category": "Normalization",
  25. "description": "A layer that performs batch normalization, which is performed along the channel axis, and repeated along the other axes, if present.",
  26. "attributes": [
  27. { "name": "epsilon", "default": 0.000009999999747378752 },
  28. { "name": "computeMeanVar", "visible": false },
  29. { "name": "instanceNormalization", "visible": false }
  30. ]
  31. },
  32. {
  33. "name": "bias",
  34. "category": "Layer",
  35. "description": "A layer that performs elementwise addition of a bias, which is broadcasted to match the input shape."
  36. },
  37. {
  38. "name": "biDirectionalLSTM",
  39. "category": "Layer",
  40. "description": "Bidirectional long short-term memory (LSTM) layer. The first LSTM operates on the input sequence in the forward direction. The second LSTM operates on the input sequence in the reverse direction.",
  41. "inputs": [
  42. { "name": "input" },
  43. { "name": "h" },
  44. { "name": "c" },
  45. { "name": "h_rev" },
  46. { "name": "c_rev" },
  47. { "name": "inputGateWeightMatrix", "visible": false },
  48. { "name": "forgetGateWeightMatrix", "visible": false },
  49. { "name": "blockInputWeightMatrix", "visible": false },
  50. { "name": "outputGateWeightMatrix", "visible": false },
  51. { "name": "inputGateRecursionMatrix", "visible": false },
  52. { "name": "forgetGateRecursionMatrix", "visible": false },
  53. { "name": "blockInputRecursionMatrix", "visible": false },
  54. { "name": "outputGateRecursionMatrix", "visible": false },
  55. { "name": "inputGateBiasVector", "visible": false },
  56. { "name": "forgetGateBiasVector", "visible": false },
  57. { "name": "blockInputBiasVector", "visible": false },
  58. { "name": "outputGateBiasVector", "visible": false },
  59. { "name": "inputGateWeightMatrix_rev", "visible": false },
  60. { "name": "forgetGateWeightMatrix_rev", "visible": false },
  61. { "name": "blockInputWeightMatrix_rev", "visible": false },
  62. { "name": "outputGateWeightMatrix_rev", "visible": false },
  63. { "name": "inputGateRecursionMatrix_rev", "visible": false },
  64. { "name": "forgetGateRecursionMatrix_rev", "visible": false },
  65. { "name": "blockInputRecursionMatrix_rev", "visible": false },
  66. { "name": "outputGateRecursionMatrix_rev", "visible": false },
  67. { "name": "inputGateBiasVector_rev", "visible": false },
  68. { "name": "forgetGateBiasVector_rev", "visible": false },
  69. { "name": "blockInputBiasVector_rev", "visible": false },
  70. { "name": "outputGateBiasVector_rev", "visible": false }
  71. ],
  72. "outputs": [
  73. { "name": "output" },
  74. { "name": "h" },
  75. { "name": "c" },
  76. { "name": "h_rev" },
  77. { "name": "c_rev" }
  78. ]
  79. },
  80. {
  81. "name": "concat",
  82. "category": "Tensor",
  83. "description": "A layer that concatenates along the channel axis (default) or sequence axis.",
  84. "inputs": [
  85. { "name": "inputs", "option": "variadic" }
  86. ]
  87. },
  88. {
  89. "name": "convolution",
  90. "category": "Layer",
  91. "description": "A layer that performs spatial convolution or deconvolution.",
  92. "attributes": [
  93. { "name": "outputShape", "type": "uint64[]", "description": "Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True. When is_deconv == False, this parameter is ignored. If it is None, the output shape is calculated automatically using the border_mode. Kindly refer to NeuralNetwork.proto for details.", "visible": false },
  94. { "name": "outputChannels", "type": "uint64", "description": "The number of kernels. Same as ``C_out`` used in the layer description.", "visible": false },
  95. { "name": "kernelChannels", "type": "uint64", "description": "Channel dimension of the kernels. Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False. Must be equal to ``inputChannels``, if isDeconvolution == True.", "visible": false },
  96. { "name": "nGroups", "type": "uint64", "description": "Group convolution, i.e. weight reuse along channel axis. Input and kernels are divided into g groups and convolution / deconvolution is applied within the groups independently. If not set or 0, it is set to the default value 1.", "default": 1 },
  97. { "name": "isDeconvolution", "type": "boolean", "description": "Flag to specify whether it is a deconvolution layer." },
  98. { "name": "valid", "type": "ValidPadding", "visible": false },
  99. { "name": "same", "type": "SamePadding", "visible": false },
  100. { "name": "dilationFactor", "type": "uint64[]", "default": [ 1, 1 ] },
  101. { "name": "stride", "type": "uint64[]", "default": [ 1, 1 ] },
  102. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  103. { "name": "hasBias", "type": "boolean", "description": "Flag to specify whether a bias is to be added or not.", "visible": false }
  104. ]
  105. },
  106. {
  107. "name": "crop",
  108. "category": "Data",
  109. "description": "A layer that crops the spatial dimensions of an input. If two inputs are provided, the shape of the second input is used as the reference shape.",
  110. "inputs": [
  111. { "name": "x1" },
  112. { "name": "x2" }
  113. ],
  114. "outputs": [
  115. { "name": "y" }
  116. ]
  117. },
  118. {
  119. "name": "dot",
  120. "description": "If true, inputs are normalized first, thereby computing the cosine similarity."
  121. },
  122. {
  123. "name": "embedding",
  124. "category": "Transform",
  125. "description": "A layer that performs a matrix lookup and optionally adds a bias."
  126. },
  127. {
  128. "name": "featureVectorizer",
  129. "inputs": [
  130. { "name": "inputs", "option": "variadic" }
  131. ]
  132. },
  133. {
  134. "name": "flatten",
  135. "category": "Shape",
  136. "description": "A layer that flattens the input.",
  137. "attributes": [
  138. { "name": "mode", "type": "FlattenLayerParams.FlattenOrder" }
  139. ]
  140. },
  141. {
  142. "name": "gelu",
  143. "category": "Activation",
  144. "description": "Gaussian error linear unit activation.",
  145. "attributes": [
  146. { "name": "mode", "type": "GeluLayerParams.GeluMode" }
  147. ]
  148. },
  149. {
  150. "name": "gru",
  151. "category": "Layer",
  152. "description": "Gated-Recurrent Unit (GRU) Layer",
  153. "inputs": [
  154. { "name": "input" },
  155. { "name": "h" },
  156. { "name": "updateGateWeightMatrix", "visible": false },
  157. { "name": "resetGateWeightMatrix", "visible": false },
  158. { "name": "outputGateWeightMatrix", "visible": false },
  159. { "name": "updateGateRecursionMatrix", "visible": false },
  160. { "name": "resetGateRecursionMatrix", "visible": false },
  161. { "name": "outputGateRecursionMatrix", "visible": false },
  162. { "name": "updateGateBiasVector", "visible": false },
  163. { "name": "resetGateBiasVector", "visible": false },
  164. { "name": "outputGateBiasVector", "visible": false }
  165. ],
  166. "outputs": [
  167. { "name": "output" },
  168. { "name": "h" }
  169. ]
  170. },
  171. {
  172. "name": "innerProduct",
  173. "category": "Layer",
  174. "description": "A layer that performs a matrix vector product. This is equivalent to a fully-connected, or dense layer.",
  175. "attributes": [
  176. { "name": "inputChannels", "type": "uint64", "visible": false },
  177. { "name": "outputChannels", "type": "uint64", "visible": false },
  178. { "name": "hasBias", "type": "boolean", "visible": false }
  179. ]
  180. },
  181. {
  182. "name": "int64ClassLabels",
  183. "category": "Data",
  184. "outputs": [
  185. { "name": "probabilities" },
  186. { "name": "feature" }
  187. ]
  188. },
  189. {
  190. "name": "itemSimilarityRecommender",
  191. "inputs": [
  192. { "name": "item" },
  193. { "name": "numRecommendations" },
  194. { "name": "itemRestriction" },
  195. { "name": "itemExclusion" }
  196. ],
  197. "outputs": [
  198. { "name": "recommendedItemList" },
  199. { "name": "recommendedItemScore" }
  200. ]
  201. },
  202. {
  203. "name": "l2normalize",
  204. "category": "Normalization",
  205. "description": "A layer that performs L2 normalization, i.e. divides by the the square root of the sum of squares of all elements of input."
  206. },
  207. {
  208. "name": "loadConstant",
  209. "category": "Data"
  210. },
  211. {
  212. "name": "lrn",
  213. "category": "Normalization",
  214. "description": "A layer that performs local response normalization (LRN).",
  215. "attributes": [
  216. { "name": "k", "default": 1 }
  217. ]
  218. },
  219. {
  220. "name": "max",
  221. "description": "A layer that computes the elementwise maximum over the inputs."
  222. },
  223. {
  224. "name": "min",
  225. "description": "A layer that computes the elementwise minimum over the inputs."
  226. },
  227. {
  228. "name": "multiply",
  229. "description": "A layer that performs elementwise multiplication.",
  230. "inputs": [
  231. { "name": "x" },
  232. { "name": "y" }
  233. ],
  234. "outputs": [
  235. { "name": "z" }
  236. ]
  237. },
  238. {
  239. "name": "mvn",
  240. "description": "Fill a constant value in the padded region."
  241. },
  242. {
  243. "name": "mvn",
  244. "category": "Normalization",
  245. "description": "A layer that performs mean variance normalization, along axis = -3."
  246. },
  247. {
  248. "name": "nonMaximumSuppression",
  249. "attributes": [
  250. { "name": "iouThreshold" },
  251. { "name": "confidenceThreshold" }
  252. ],
  253. "inputs": [
  254. { "name": "confidence" },
  255. { "name": "coordinates" },
  256. { "name": "iouThreshold" },
  257. { "name": "confidenceThreshold" }
  258. ],
  259. "outputs": [
  260. { "name": "confidence" },
  261. { "name": "coordinates" }
  262. ]
  263. },
  264. {
  265. "name": "padding",
  266. "category": "Shape",
  267. "description": "Fill a constant value in the padded region.",
  268. "attributes": [
  269. { "name": "paddingAmounts", "visible": false }
  270. ]
  271. },
  272. {
  273. "name": "permute",
  274. "category": "Shape",
  275. "description": "A layer that rearranges the dimensions and data of an input."
  276. },
  277. {
  278. "name": "pooling",
  279. "category": "Pool",
  280. "description": "Spatial Pooling layer to reduce dimensions of input using the specified kernel size and type.",
  281. "attributes": [
  282. { "name": "includeLastPixel", "type": "ValidCompletePadding", "visible": false },
  283. { "name": "same", "type": "SamePadding", "visible": false },
  284. { "name": "valid", "type": "ValidCompletePadding", "visible": false },
  285. { "name": "type", "type": "PoolingLayerParams.PoolingType" },
  286. { "name": "globalPooling", "type": "boolean", "default": false },
  287. { "name": "stride", "type": "uint64", "default": [ 1, 1 ] },
  288. { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] },
  289. { "name": "avgPoolExcludePadding", "type": "boolean", "default": false }
  290. ]
  291. },
  292. {
  293. "name": "reduce",
  294. "description": "A layer that reduces the input using a specified operation."
  295. },
  296. {
  297. "name": "reorganizeData",
  298. "category": "Shape",
  299. "description": "A layer that reorganizes data in the input in: 1. SPACE_TO_DEPTH, 2. DEPTH_TO_SPACE."
  300. },
  301. {
  302. "name": "reshape",
  303. "category": "Shape",
  304. "description": "A layer that recasts the input into a new shape."
  305. },
  306. {
  307. "name": "scale",
  308. "category": "Layer",
  309. "description": "A layer that performs elmentwise multiplication by a scale factor and optionally adds a bias.",
  310. "attributes": [
  311. { "name": "hasBias", "type": "boolean", "visible": false }
  312. ]
  313. },
  314. {
  315. "name": "scaler",
  316. "category": "Data"
  317. },
  318. {
  319. "name": "sequenceRepeat",
  320. "category": "Shape",
  321. "description": "A layer that repeats a sequence."
  322. },
  323. {
  324. "name": "slice",
  325. "description": "A layer that slices the input data along a given axis."
  326. },
  327. {
  328. "name": "slice",
  329. "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs."
  330. },
  331. {
  332. "name": "softmax",
  333. "category": "Activation",
  334. "description": "A layer that performs softmax normalization. Normalization is done along the channel axis."
  335. },
  336. {
  337. "name": "softmaxND",
  338. "category": "Activation",
  339. "description": "A layer that performs softmax normalization along a specified axis."
  340. },
  341. {
  342. "name": "squeeze",
  343. "category": "Transform"
  344. },
  345. {
  346. "name": "stringClassLabels",
  347. "category": "Data",
  348. "outputs": [
  349. { "name": "probabilities" },
  350. { "name": "feature" }
  351. ]
  352. },
  353. {
  354. "name": "textClassifier",
  355. "attributes": [
  356. { "name": "revision", "visible": false }
  357. ]
  358. },
  359. {
  360. "name": "unary",
  361. "description": "A layer that applies a unary function.",
  362. "attributes": [
  363. { "name": "type", "type": "UnaryFunctionLayerParams.Operation" },
  364. { "name": "alpha", "default": 1 },
  365. { "name": "scale", "default": 1 },
  366. { "name": "epsilon", "default": 9.999999974752427e-7 }
  367. ],
  368. "inputs": [
  369. { "name": "x" }
  370. ],
  371. "outputs": [
  372. { "name": "z" }
  373. ]
  374. },
  375. {
  376. "name": "uniDirectionalLSTM",
  377. "category": "Layer",
  378. "description": "A unidirectional long short-term memory (LSTM) layer.",
  379. "inputs": [
  380. { "name": "input" },
  381. { "name": "h" },
  382. { "name": "c" },
  383. { "name": "inputGateWeightMatrix", "visible": false },
  384. { "name": "forgetGateWeightMatrix", "visible": false },
  385. { "name": "blockInputWeightMatrix", "visible": false },
  386. { "name": "outputGateWeightMatrix", "visible": false },
  387. { "name": "inputGateRecursionMatrix", "visible": false },
  388. { "name": "forgetGateRecursionMatrix", "visible": false },
  389. { "name": "blockInputRecursionMatrix", "visible": false },
  390. { "name": "outputGateRecursionMatrix", "visible": false },
  391. { "name": "inputGateBiasVector", "visible": false },
  392. { "name": "forgetGateBiasVector", "visible": false },
  393. { "name": "blockInputBiasVector", "visible": false },
  394. { "name": "outputGateBiasVector", "visible": false }
  395. ],
  396. "outputs": [
  397. { "name": "output" },
  398. { "name": "h" },
  399. { "name": "c" }
  400. ]
  401. },
  402. {
  403. "name": "upsample",
  404. "category": "Data",
  405. "description": "A layer that scales up spatial dimensions. It supports two modes: nearest neighbour (default) and bilinear."
  406. },
  407. {
  408. "name": "transpose",
  409. "category": "Transform"
  410. },
  411. {
  412. "name": "wordTagger",
  413. "attributes": [
  414. { "name": "revision", "visible": false }
  415. ],
  416. "outputs": [
  417. { "name": "tokens" },
  418. { "name": "tags" },
  419. { "name": "locations" },
  420. { "name": "lengths" }
  421. ]
  422. },
  423. {
  424. "name": "program:conv",
  425. "category": "Layer",
  426. "inputs": [
  427. { "name": "x" },
  428. { "name": "weight" },
  429. { "name": "bias" }
  430. ]
  431. },
  432. {
  433. "name": "program:linear",
  434. "category": "Layer",
  435. "inputs": [
  436. { "name": "x" },
  437. { "name": "weight" },
  438. { "name": "bias" }
  439. ]
  440. },
  441. {
  442. "name": "program:pad",
  443. "category": "Tensor"
  444. },
  445. {
  446. "name": "program:transpose",
  447. "category": "Transform"
  448. },
  449. {
  450. "name": "program:sigmoid",
  451. "category": "Activation"
  452. },
  453. {
  454. "name": "program:softmax",
  455. "category": "Activation"
  456. },
  457. {
  458. "name": "program:relu",
  459. "category": "Activation"
  460. },
  461. {
  462. "name": "program:relu6",
  463. "category": "Activation"
  464. },
  465. {
  466. "name": "program:reshape",
  467. "category": "Shape"
  468. }
  469. ]