浏览代码

Update -operator.json files

Lutz Roeder 8 年之前
父节点
当前提交
5512c12709
共有 4 个文件被更改,包括 167 次插入19 次删除
  1. 5 1
      README.md
  2. 11 11
      src/keras-operator.json
  3. 43 5
      src/onnx-operator.json
  4. 108 2
      src/tflite.js

+ 5 - 1
README.md

@@ -1,7 +1,7 @@
 
 <p align='center'><img width='400' src='media/logo.png'/></p>
 
-Netron is a viewer for neural network and machine learning models. 
+Netron is a viewer for neural network, deep learning and machine learning models. 
 
 Netron supports **[ONNX](http://onnx.ai)** (`.onnx`, `.pb`), **Keras** (`.h5`, `.keras`), **CoreML** (`.mlmodel`) and **TensorFlow Lite** (`.tflite`). 
 
@@ -67,6 +67,10 @@ Sample model files you can download and open:
 * [BVLC CaffeNet](http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel)
 * [BVLC GoogleNet](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
 
+**MXNet Models**
+* [CaffeNet](http://data.dmlc.ml/models/imagenet/squeezenet/squeezenet_v1.1-symbol.json)
+* [SqueezeNet v1.1](https://mxnet.incubator.apache.org/model_zoo/index.html)
+
 **TensorFlow models**
 
 * [Inception v3](https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz)

+ 11 - 11
src/keras-operator.json

@@ -672,7 +672,7 @@
         },
         {
           "default": 0.99,
-          "description": "Momentum for the moving mean and the moving variance.",
+          "description": "Momentum for the moving average.",
           "name": "momentum"
         },
         {
@@ -1262,7 +1262,7 @@
               "seed": null
             }
           },
-          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs.\n(see [initializers](https://keras.io/initializers)).",
           "name": "kernel_initializer"
         },
         {
@@ -1273,7 +1273,7 @@
               "seed": null
             }
           },
-          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state.\n(see [initializers](https://keras.io/initializers)).",
           "name": "recurrent_initializer"
         },
         {
@@ -1709,7 +1709,7 @@
               "seed": null
             }
           },
-          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs.\n(see [initializers](https://keras.io/initializers)).",
           "name": "kernel_initializer"
         },
         {
@@ -1720,7 +1720,7 @@
               "seed": null
             }
           },
-          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state.\n(see [initializers](https://keras.io/initializers)).",
           "name": "recurrent_initializer"
         },
         {
@@ -1879,11 +1879,11 @@
           "name": "use_bias"
         },
         {
-          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs.\n(see [initializers](https://keras.io/initializers)).",
           "name": "kernel_initializer"
         },
         {
-          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state.\n(see [initializers](https://keras.io/initializers)).",
           "name": "recurrent_initializer"
         },
         {
@@ -1948,11 +1948,11 @@
           "name": "use_bias"
         },
         {
-          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs.\n(see [initializers](https://keras.io/initializers)).",
           "name": "kernel_initializer"
         },
         {
-          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state.\n(see [initializers](https://keras.io/initializers)).",
           "name": "recurrent_initializer"
         },
         {
@@ -2021,11 +2021,11 @@
           "name": "use_bias"
         },
         {
-          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `kernel` weights matrix,\nused for the linear transformation of the inputs.\n(see [initializers](https://keras.io/initializers)).",
           "name": "kernel_initializer"
         },
         {
-          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state\n(see [initializers](https://keras.io/initializers)).",
+          "description": "Initializer for the `recurrent_kernel`\nweights matrix,\nused for the linear transformation of the recurrent state.\n(see [initializers](https://keras.io/initializers)).",
           "name": "recurrent_initializer"
         },
         {

+ 43 - 5
src/onnx-operator.json

@@ -703,18 +703,36 @@
           "allowed_type_strs": [
             "tensor(float16)",
             "tensor(float)",
-            "tensor(double)"
+            "tensor(double)",
+            "tensor(int8)",
+            "tensor(int16)",
+            "tensor(int32)",
+            "tensor(int64)",
+            "tensor(uint8)",
+            "tensor(uint16)",
+            "tensor(uint32)",
+            "tensor(uint64)",
+            "tensor(bool)"
           ],
-          "description": "Constrain input types to float tensors.",
+          "description": "Constrain input types. Casting from strings and complex are not supported.",
           "type_param_str": "T1"
         },
         {
           "allowed_type_strs": [
             "tensor(float16)",
             "tensor(float)",
-            "tensor(double)"
+            "tensor(double)",
+            "tensor(int8)",
+            "tensor(int16)",
+            "tensor(int32)",
+            "tensor(int64)",
+            "tensor(uint8)",
+            "tensor(uint16)",
+            "tensor(uint32)",
+            "tensor(uint64)",
+            "tensor(bool)"
           ],
-          "description": "Constrain output types to float tensors.",
+          "description": "Constrain output types. Casting to strings and complex are not supported.",
           "type_param_str": "T2"
         }
       ]
@@ -2623,6 +2641,16 @@
       "category": "Pool",
       "description": "GlobalAveragePool consumes an input tensor X and applies average pooling across the\n the values in the same channel. This is equivalent to AveragePool with kernel size\n equal to the spatial dimension of input tensor.",
       "domain": "ai.onnx",
+      "examples": [
+        {
+          "code": "node = onnx.helper.make_node(\n    'GlobalAveragePool',\n    inputs=['x'],\n    outputs=['y'],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\nspatial_shape = np.ndim(x) - 2\ny = np.average(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))\nfor _ in range(spatial_shape):\n    y = np.expand_dims(y, -1)\nexpect(node, inputs=[x], outputs=[y], name='test_globalaveragepool')",
+          "summary": "globalaveragepool"
+        },
+        {
+          "code": "\nnode = onnx.helper.make_node(\n    'GlobalAveragePool',\n    inputs=['x'],\n    outputs=['y'],\n)\nx = np.array([[[\n  [1, 2, 3],\n  [4, 5, 6],\n  [7, 8, 9],\n]]]).astype(np.float32)\ny = np.array([[[[5]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name='test_globalaveragepool_precomputed')",
+          "summary": "globalaveragepool_precomputed"
+        }
+      ],
       "inputs": [
         {
           "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.",
@@ -2756,6 +2784,16 @@
       "category": "Pool",
       "description": "GlobalMaxPool consumes an input tensor X and applies max pooling across the\n the values in the same channel. This is equivalent to MaxPool with kernel size\n equal to the spatial dimension of input tensor.",
       "domain": "ai.onnx",
+      "examples": [
+        {
+          "code": "\nnode = onnx.helper.make_node(\n    'GlobalMaxPool',\n    inputs=['x'],\n    outputs=['y'],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\nspatial_shape = np.ndim(x) - 2\ny = np.max(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))\nfor _ in range(spatial_shape):\n    y = np.expand_dims(y, -1)\nexpect(node, inputs=[x], outputs=[y], name='test_globalmaxpool')",
+          "summary": "globalmaxpool"
+        },
+        {
+          "code": "\nnode = onnx.helper.make_node(\n    'GlobalMaxPool',\n    inputs=['x'],\n    outputs=['y'],\n)\nx = np.array([[[\n    [1, 2, 3],\n    [4, 5, 6],\n    [7, 8, 9],\n]]]).astype(np.float32)\ny = np.array([[[[9]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name='test_globalmaxpool_precomputed')",
+          "summary": "globalmaxpool_precomputed"
+        }
+      ],
       "inputs": [
         {
           "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.",
@@ -6927,7 +6965,7 @@
           "summary": "slice"
         },
         {
-          "code": "node = onnx.helper.make_node(\n    'Slice',\n    inputs=['x'],\n    outputs=['y'],\n    starts=[0, 0, 3],\n    ends=[20, 10, 4],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x], outputs=[y],\n       name='test_default_axes')",
+          "code": "node = onnx.helper.make_node(\n    'Slice',\n    inputs=['x'],\n    outputs=['y'],\n    starts=[0, 0, 3],\n    ends=[20, 10, 4],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x], outputs=[y],\n       name='test_slice_default_axes')",
           "summary": "slice_default_axes"
         },
         {

+ 108 - 2
src/tflite.js

@@ -27,6 +27,7 @@ tflite.BuiltinOperator = {
   CONCATENATION: 2,
   CONV_2D: 3,
   DEPTHWISE_CONV_2D: 4,
+  DEQUANTIZE: 6,
   EMBEDDING_LOOKUP: 7,
   FULLY_CONNECTED: 9,
   HASHTABLE_LOOKUP: 10,
@@ -71,7 +72,8 @@ tflite.BuiltinOperator = {
   SPLIT: 49,
   LOG_SOFTMAX: 50,
   DELEGATE: 51,
-  BIDIRECTIONAL_SEQUENCE_LSTM: 52
+  BIDIRECTIONAL_SEQUENCE_LSTM: 52,
+  CAST: 53
 };
 
 /**
@@ -114,7 +116,9 @@ tflite.BuiltinOptions = {
   ExpOptions: 33,
   TopKV2Options: 34,
   SplitOptions: 35,
-  LogSoftmaxOptions: 36
+  LogSoftmaxOptions: 36,
+  CastOptions: 37,
+  DequantizeOptions: 38
 };
 
 /**
@@ -3575,6 +3579,108 @@ tflite.LogSoftmaxOptions.endLogSoftmaxOptions = function(builder) {
   return offset;
 };
 
+/**
+ * @constructor
+ */
+tflite.CastOptions = function() {
+  /**
+   * @type {flatbuffers.ByteBuffer}
+   */
+  this.bb = null;
+
+  /**
+   * @type {number}
+   */
+  this.bb_pos = 0;
+};
+
+/**
+ * @param {number} i
+ * @param {flatbuffers.ByteBuffer} bb
+ * @returns {tflite.CastOptions}
+ */
+tflite.CastOptions.prototype.__init = function(i, bb) {
+  this.bb_pos = i;
+  this.bb = bb;
+  return this;
+};
+
+/**
+ * @param {flatbuffers.ByteBuffer} bb
+ * @param {tflite.CastOptions=} obj
+ * @returns {tflite.CastOptions}
+ */
+tflite.CastOptions.getRootAsCastOptions = function(bb, obj) {
+  return (obj || new tflite.CastOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ */
+tflite.CastOptions.startCastOptions = function(builder) {
+  builder.startObject(0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @returns {flatbuffers.Offset}
+ */
+tflite.CastOptions.endCastOptions = function(builder) {
+  var offset = builder.endObject();
+  return offset;
+};
+
+/**
+ * @constructor
+ */
+tflite.DequantizeOptions = function() {
+  /**
+   * @type {flatbuffers.ByteBuffer}
+   */
+  this.bb = null;
+
+  /**
+   * @type {number}
+   */
+  this.bb_pos = 0;
+};
+
+/**
+ * @param {number} i
+ * @param {flatbuffers.ByteBuffer} bb
+ * @returns {tflite.DequantizeOptions}
+ */
+tflite.DequantizeOptions.prototype.__init = function(i, bb) {
+  this.bb_pos = i;
+  this.bb = bb;
+  return this;
+};
+
+/**
+ * @param {flatbuffers.ByteBuffer} bb
+ * @param {tflite.DequantizeOptions=} obj
+ * @returns {tflite.DequantizeOptions}
+ */
+tflite.DequantizeOptions.getRootAsDequantizeOptions = function(bb, obj) {
+  return (obj || new tflite.DequantizeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ */
+tflite.DequantizeOptions.startDequantizeOptions = function(builder) {
+  builder.startObject(0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @returns {flatbuffers.Offset}
+ */
+tflite.DequantizeOptions.endDequantizeOptions = function(builder) {
+  var offset = builder.endObject();
+  return offset;
+};
+
 /**
  * @constructor
  */