Просмотр исходного кода

Rename protobuf files to -proto.js

Lutz Roeder 7 лет назад
Родитель
Сommit
6c621a47da

+ 6 - 6
setup.py

@@ -86,14 +86,14 @@ setuptools.setup(
         'netron': [ 
             'favicon.ico', 'icon.png',
             'onnx-model.js', 'onnx.js', 'onnx-metadata.json',
-            'tf-model.js', 'tf.js', 'tf-metadata.json',
-            'tflite-model.js', 'tflite.js', 'tflite-metadata.json',
+            'tf-model.js', 'tf-metadata.json', 'tf-proto.js', 
+            'tflite-model.js', 'tflite-metadata.json', 'tflite-schema.js', 
             'keras-model.js', 'keras-metadata.json', 'hdf5.js',
-            'coreml-model.js', 'coreml-metadata.json', 'coreml.js',
-            'caffe-model.js', 'caffe-metadata.json', 'caffe.js',
-            'caffe2-model.js', 'caffe2-metadata.json', 'caffe2.js',
+            'coreml-model.js', 'coreml-metadata.json', 'coreml-proto.js',
+            'caffe-model.js', 'caffe-metadata.json', 'caffe-proto.js',
+            'caffe2-model.js', 'caffe2-metadata.json', 'caffe2-proto.js',
             'mxnet-model.js', 'mxnet-metadata.json',
-            'cntk-model.js', 'cntk.js', 'cntk-metadata.json',
+            'cntk-model.js', 'cntk-metadata.json', 'cntk-proto.js',
             'pytorch-model.js', 'pytorch-metadata.json', 'pickle.js',
             'sklearn-model.js', 'sklearn-metadata.json',
             'numpy.js', 'zip.js', 'tar.js', 'gzip.js',

+ 1 - 1
src/caffe-model.js

@@ -24,7 +24,7 @@ class CaffeModelFactory {
     }
 
     open(context, host, callback) { 
-        host.require('caffe', (err) => {
+        host.require('caffe-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/caffe.js → src/caffe-proto.js


+ 1 - 1
src/caffe2-model.js

@@ -19,7 +19,7 @@ class Caffe2ModelFactory {
     }    
 
     open(context, host, callback) {
-        host.require('caffe2', (err) => {
+        host.require('caffe2-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/caffe2.js → src/caffe2-proto.js


+ 1 - 1
src/cntk-model.js

@@ -18,7 +18,7 @@ class CntkModelFactory {
     }
 
     open(context, host, callback) { 
-        host.require('cntk', (err) => {
+        host.require('cntk-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/cntk.js → src/cntk-proto.js


+ 1 - 1
src/coreml-model.js

@@ -10,7 +10,7 @@ class CoreMLModelFactory {
     }
 
     open(context, host, callback) { 
-        host.require('coreml', (err) => {
+        host.require('coreml-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/coreml.js → src/coreml-proto.js


+ 1 - 1
src/onnx-model.js

@@ -32,7 +32,7 @@ class OnnxModelFactory {
     }
 
     open(context, host, callback) { 
-        host.require('onnx', (err) => {
+        host.require('onnx-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/onnx.js → src/onnx-proto.js


+ 398 - 17
src/tf-metadata.json

@@ -3419,7 +3419,7 @@
           "type": "type"
         },
         {
-          "default": 1e-05,
+          "default": 9.999999747378752e-06,
           "name": "tolerance",
           "type": "float"
         }
@@ -15795,6 +15795,35 @@
       ]
     }
   },
+  {
+    "name": "ExperimentalNonSerializableDataset",
+    "schema": {
+      "attributes": [
+        {
+          "minimum": 1,
+          "name": "output_types",
+          "type": "list(type)"
+        },
+        {
+          "minimum": 1,
+          "name": "output_shapes",
+          "type": "list(shape)"
+        }
+      ],
+      "inputs": [
+        {
+          "name": "input_dataset",
+          "type": 21
+        }
+      ],
+      "outputs": [
+        {
+          "name": "handle",
+          "type": 21
+        }
+      ]
+    }
+  },
   {
     "name": "ExperimentalNumaMapAndBatchDataset",
     "schema": {
@@ -17883,7 +17912,7 @@
           "type": "type"
         },
         {
-          "default": 0.0001,
+          "default": 9.999999747378752e-05,
           "description": "A small float number added to the variance of x.",
           "name": "epsilon",
           "type": "float"
@@ -17980,7 +18009,7 @@
           "type": "type"
         },
         {
-          "default": 0.0001,
+          "default": 9.999999747378752e-05,
           "description": "A small float number added to the variance of x.",
           "name": "epsilon",
           "type": "float"
@@ -18095,7 +18124,7 @@
           "type": "type"
         },
         {
-          "default": 0.0001,
+          "default": 9.999999747378752e-05,
           "description": "A small float number added to the variance of x.",
           "name": "epsilon",
           "type": "float"
@@ -18210,7 +18239,7 @@
           "type": "type"
         },
         {
-          "default": 0.0001,
+          "default": 9.999999747378752e-05,
           "description": "A small float number added to the variance of x.",
           "name": "epsilon",
           "type": "float"
@@ -21457,7 +21486,7 @@
     "schema": {
       "attributes": [
         {
-          "default": 0.2,
+          "default": 0.20000000298023224,
           "name": "alpha",
           "type": "float"
         },
@@ -21467,6 +21496,10 @@
               "type": "type",
               "value": 19
             },
+            {
+              "type": "type",
+              "value": 14
+            },
             {
               "type": "type",
               "value": 1
@@ -21504,7 +21537,7 @@
     "schema": {
       "attributes": [
         {
-          "default": 0.2,
+          "default": 0.20000000298023224,
           "name": "alpha",
           "type": "float"
         },
@@ -21514,6 +21547,10 @@
               "type": "type",
               "value": 19
             },
+            {
+              "type": "type",
+              "value": 14
+            },
             {
               "type": "type",
               "value": 1
@@ -23942,6 +23979,51 @@
       "summary": "Solves one or more linear least-squares problems."
     }
   },
+  {
+    "name": "MatrixSquareRoot",
+    "schema": {
+      "attributes": [
+        {
+          "allowedValues": [
+            {
+              "type": "type",
+              "value": 2
+            },
+            {
+              "type": "type",
+              "value": 1
+            },
+            {
+              "type": "type",
+              "value": 8
+            },
+            {
+              "type": "type",
+              "value": 18
+            }
+          ],
+          "name": "T",
+          "type": "type"
+        }
+      ],
+      "description": "matmul(sqrtm(A), sqrtm(A)) = A\n\nThe input matrix should be invertible. If the input matrix is real, it should\nhave no eigenvalues which are real and negative (pairs of complex conjugate\neigenvalues are allowed).\n\nThe matrix square root is computed by first reducing the matrix to \nquasi-triangular form with the real Schur decomposition. The square root \nof the quasi-triangular matrix is then computed directly. Details of \nthe algorithm can be found in: Nicholas J. Higham, \"Computing real \nsquare roots of a real matrix\", Linear Algebra Appl., 1987.\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the matrix square root for all input submatrices `[..., :, :]`.",
+      "inputs": [
+        {
+          "description": "Shape is `[..., M, M]`.",
+          "name": "input",
+          "typeAttr": "T"
+        }
+      ],
+      "outputs": [
+        {
+          "description": "Shape is `[..., M, M]`.\n\n@compatibility(scipy)\nEquivalent to scipy.linalg.sqrtm\n@end_compatibility",
+          "name": "output",
+          "typeAttr": "T"
+        }
+      ],
+      "summary": "Computes the matrix square root of one or more square matrices:"
+    }
+  },
   {
     "name": "MatrixTriangularSolve",
     "schema": {
@@ -26601,7 +26683,7 @@
           "type": "int"
         },
         {
-          "default": 0.8,
+          "default": 0.800000011920929,
           "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.",
           "name": "max_load_factor",
           "type": "float"
@@ -26673,7 +26755,7 @@
           "type": "int"
         },
         {
-          "default": 0.8,
+          "default": 0.800000011920929,
           "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.",
           "name": "max_load_factor",
           "type": "float"
@@ -26941,6 +27023,185 @@
       "summary": "Creates a Mutex resource that can be locked by `MutexLock`."
     }
   },
+  {
+    "name": "NcclAllReduce",
+    "schema": {
+      "attributes": [
+        {
+          "allowedValues": [
+            "min",
+            "max",
+            "prod",
+            "sum"
+          ],
+          "name": "reduction",
+          "type": "string"
+        },
+        {
+          "allowedValues": [
+            {
+              "type": "type",
+              "value": 19
+            },
+            {
+              "type": "type",
+              "value": 1
+            },
+            {
+              "type": "type",
+              "value": 2
+            },
+            {
+              "type": "type",
+              "value": 3
+            },
+            {
+              "type": "type",
+              "value": 9
+            }
+          ],
+          "name": "T",
+          "type": "type"
+        },
+        {
+          "name": "num_devices",
+          "type": "int"
+        },
+        {
+          "name": "shared_name",
+          "type": "string"
+        }
+      ],
+      "description": "Outputs a tensor containing the reduction across all input tensors passed to ops\nwithin the same `shared_name.\n\nThe graph should be constructed so if one op runs with shared_name value `c`,\nthen `num_devices` ops will run with shared_name value `c`.  Failure to do so\nwill cause the graph execution to fail to complete.\n\ninput: the input to the reduction\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.\nnum_devices: The number of devices participating in this reduction.\nshared_name: Identifier that shared between ops of the same reduction.",
+      "inputs": [
+        {
+          "name": "input",
+          "typeAttr": "T"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "data",
+          "typeAttr": "T"
+        }
+      ],
+      "summary": "Outputs a tensor containing the reduction across all input tensors."
+    }
+  },
+  {
+    "name": "NcclBroadcast",
+    "schema": {
+      "attributes": [
+        {
+          "allowedValues": [
+            {
+              "type": "type",
+              "value": 19
+            },
+            {
+              "type": "type",
+              "value": 1
+            },
+            {
+              "type": "type",
+              "value": 2
+            },
+            {
+              "type": "type",
+              "value": 3
+            },
+            {
+              "type": "type",
+              "value": 9
+            }
+          ],
+          "name": "T",
+          "type": "type"
+        },
+        {
+          "name": "shape",
+          "type": "shape"
+        }
+      ],
+      "description": "Sends `input` to all devices that are connected to the output.\n\nThe graph should be constructed so that all ops connected to the output have a\nvalid device assignment, and the op itself is assigned one of these devices.\n\ninput: The input to the broadcast.\noutput: The same as input.\nshape: The shape of the input tensor.\n",
+      "inputs": [
+        {
+          "name": "input",
+          "typeAttr": "T"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "output",
+          "typeAttr": "T"
+        }
+      ],
+      "summary": "Sends `input` to all devices that are connected to the output."
+    }
+  },
+  {
+    "name": "NcclReduce",
+    "schema": {
+      "attributes": [
+        {
+          "allowedValues": [
+            "min",
+            "max",
+            "prod",
+            "sum"
+          ],
+          "name": "reduction",
+          "type": "string"
+        },
+        {
+          "allowedValues": [
+            {
+              "type": "type",
+              "value": 19
+            },
+            {
+              "type": "type",
+              "value": 1
+            },
+            {
+              "type": "type",
+              "value": 2
+            },
+            {
+              "type": "type",
+              "value": 3
+            },
+            {
+              "type": "type",
+              "value": 9
+            }
+          ],
+          "name": "T",
+          "type": "type"
+        },
+        {
+          "minimum": 1,
+          "name": "num_devices",
+          "type": "int"
+        }
+      ],
+      "description": "Reduces `input` from `num_devices` using `reduction` to a single device.\n\nThe graph should be constructed so that all inputs have a valid device\nassignment, and the op itself is assigned one of these devices.\n\ninput: The input to the reduction.\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.",
+      "inputs": [
+        {
+          "name": "input",
+          "numberAttr": "num_devices",
+          "typeAttr": "T"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "data",
+          "typeAttr": "T"
+        }
+      ],
+      "summary": "Reduces `input` from `num_devices` using `reduction` to a single device."
+    }
+  },
   {
     "name": "Neg",
     "schema": {
@@ -29651,6 +29912,11 @@
           "default": "",
           "name": "config",
           "type": "string"
+        },
+        {
+          "default": "",
+          "name": "executor_type",
+          "type": "string"
         }
       ],
       "inputs": [
@@ -31637,13 +31903,13 @@
           "type": "float"
         },
         {
-          "default": 1e-05,
+          "default": 9.999999747378752e-06,
           "description": "A small float number to avoid dividing by 0.",
           "name": "variance_epsilon",
           "type": "float"
         },
         {
-          "default": 0.001,
+          "default": 0.0010000000474974513,
           "description": "Minimum value of `y_max - y_min`",
           "name": "min_separation",
           "type": "float"
@@ -33275,6 +33541,55 @@
       "summary": "Returns a `RaggedTensor` containing the specified sequences of numbers."
     }
   },
+  {
+    "name": "RaggedTensorToSparse",
+    "schema": {
+      "attributes": [
+        {
+          "description": "The ragged rank of the input RaggedTensor.  `rt_nested_splits` should contain\nthis number of ragged-splits tensors.  This value should equal\n`input.ragged_rank`.",
+          "minimum": 1,
+          "name": "RAGGED_RANK",
+          "type": "int"
+        },
+        {
+          "name": "T",
+          "type": "type"
+        }
+      ],
+      "description": "input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)\noutput=SparseTensor(indices=sparse_indices, values=sparse_values,\n                    dense_shape=sparse_dense_shape)",
+      "inputs": [
+        {
+          "description": "The `row_splits` for the `RaggedTensor`.",
+          "name": "rt_nested_splits",
+          "numberAttr": "RAGGED_RANK",
+          "type": 9
+        },
+        {
+          "description": "The `inner_values` for the `RaggedTensor`.",
+          "name": "rt_dense_values",
+          "typeAttr": "T"
+        }
+      ],
+      "outputs": [
+        {
+          "description": "The indices for the `SparseTensor`.",
+          "name": "sparse_indices",
+          "type": 9
+        },
+        {
+          "description": "The values of the `SparseTensor`.",
+          "name": "sparse_values",
+          "typeAttr": "T"
+        },
+        {
+          "description": "`sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.",
+          "name": "sparse_dense_shape",
+          "type": 9
+        }
+      ],
+      "summary": "Converts a `RaggedTensor` into a `SparseTensor` with the same values."
+    }
+  },
   {
     "name": "RandomCrop",
     "schema": {
@@ -41550,7 +41865,7 @@
           "type": "int"
         },
         {
-          "default": 0.1,
+          "default": 0.10000000149011612,
           "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.",
           "name": "min_object_covered",
           "type": "float"
@@ -41558,7 +41873,7 @@
         {
           "default": [
             0.75,
-            1.33
+            1.3300000429153442
           ],
           "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.",
           "name": "aspect_ratio_range",
@@ -41566,7 +41881,7 @@
         },
         {
           "default": [
-            0.05,
+            0.05000000074505806,
             1.0
           ],
           "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.",
@@ -41664,7 +41979,7 @@
         {
           "default": [
             0.75,
-            1.33
+            1.3300000429153442
           ],
           "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.",
           "name": "aspect_ratio_range",
@@ -41672,7 +41987,7 @@
         },
         {
           "default": [
-            0.05,
+            0.05000000074505806,
             1.0
           ],
           "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.",
@@ -45203,7 +45518,7 @@
           "type": "int"
         },
         {
-          "default": 0.001,
+          "default": 0.0010000000474974513,
           "description": "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable.",
           "name": "subsample",
           "type": "float"
@@ -51692,6 +52007,11 @@
           "default": "",
           "name": "config",
           "type": "string"
+        },
+        {
+          "default": "",
+          "name": "executor_type",
+          "type": "string"
         }
       ],
       "inputs": [
@@ -56444,6 +56764,67 @@
       "summary": "Determine the script codes of a given tensor of Unicode integer code points."
     }
   },
+  {
+    "name": "UnicodeTranscode",
+    "schema": {
+      "attributes": [
+        {
+          "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.",
+          "name": "input_encoding",
+          "type": "string"
+        },
+        {
+          "allowedValues": [
+            "UTF-8",
+            "UTF-16-BE",
+            "UTF-32-BE"
+          ],
+          "description": "The unicode encoding to use in the output. Must be one of\n`\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`. Multi-byte encodings will be big-endian.",
+          "name": "output_encoding",
+          "type": "string"
+        },
+        {
+          "allowedValues": [
+            "strict",
+            "replace",
+            "ignore"
+          ],
+          "default": "replace",
+          "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter.",
+          "name": "errors",
+          "type": "string"
+        },
+        {
+          "default": 65533,
+          "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)\n\nNote that for UTF-8, passing a replacement character expressible in 1 byte, such\nas ' ', will preserve string alignment to the source since invalid bytes will be\nreplaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte\nreplacement character will preserve byte alignment to the source.",
+          "name": "replacement_char",
+          "type": "int"
+        },
+        {
+          "default": false,
+          "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.",
+          "name": "replace_control_characters",
+          "type": "bool"
+        }
+      ],
+      "description": "The input is a string tensor of any shape. The output is a string tensor of\nthe same shape containing the transcoded strings. Output strings are always\nvalid unicode. If the input contains invalid encoding positions, the\n`errors` attribute sets the policy for how to deal with them. If the default\nerror-handling policy is used, invalid formatting will be substituted in the\noutput by the `replacement_char`. If the errors policy is to `ignore`, any\ninvalid encoding positions in the input are skipped and not included in the\noutput. If it set to `strict` then any invalid formatting will result in an\nInvalidArgument error.\n\nThis operation can be used with `output_encoding = input_encoding` to enforce\ncorrect formatting for inputs even if they are already in the desired encoding.\n\nIf the input is prefixed by a Byte Order Mark needed to determine encoding\n(e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that\nBOM will be consumed and not emitted into the output. If the input encoding\nis marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is\ninterpreted as a non-breaking-space and is preserved in the output (including\nalways for UTF-8).\n\nThe end result is that if the input is marked as an explicit endianness the\ntranscoding is faithful to all codepoints in the source. If it is not marked\nwith an explicit endianness, the BOM is not considered part of the string itself\nbut as metadata, and so is not preserved in the output.",
+      "inputs": [
+        {
+          "description": "The text to be processed. Can have any shape.",
+          "name": "input",
+          "type": 7
+        }
+      ],
+      "outputs": [
+        {
+          "description": "A string tensor containing unicode text encoded using `output_encoding`.",
+          "name": "output",
+          "type": 7
+        }
+      ],
+      "summary": "Transcode the input text from a source encoding to a destination encoding."
+    }
+  },
   {
     "name": "UniformCandidateSampler",
     "schema": {

+ 1 - 1
src/tf-model.js

@@ -30,7 +30,7 @@ class TensorFlowModelFactory {
     }
 
     open(context, host, callback) { 
-        host.require('tf', (err) => {
+        host.require('tf-proto', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/tf.js → src/tf-proto.js


+ 1 - 1
src/tflite-model.js

@@ -9,7 +9,7 @@ class TensorFlowLiteModelFactory {
     }
 
     open(context, host, callback) {
-        host.require('tflite', (err) => {
+        host.require('tflite-schema', (err) => {
             if (err) {
                 callback(err, null);
                 return;

+ 0 - 0
src/tflite.js → src/tflite-schema.js


+ 2 - 2
tools/metadata/caffe-update

@@ -24,6 +24,6 @@ echo "Generate 'caffe.js'"
 mkdir -p ${tools}/metadata/caffe
 cp ${third_party}/${identifier}/src/caffe/proto/caffe.proto ${tools}/metadata/caffe/caffe.proto
 node ${tools}/metadata/caffe-update.js ${tools}/metadata/caffe/caffe.proto
-${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r caffe -o ${src}/caffe.js ${tools}/metadata/caffe/caffe.proto
+${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r caffe -o ${src}/caffe-proto.js ${tools}/metadata/caffe/caffe.proto
+node ${tools}/metadata/update_pbjs.js array ${src}/caffe-proto.js data float 1
 rm -rf ${tools}/metadata/caffe
-node ${tools}/metadata/update_pbjs.js array ${src}/caffe.js data float 1

+ 2 - 2
tools/metadata/cntk-update

@@ -22,5 +22,5 @@ else
 fi
 
 echo "Generate 'cntk.js'"
-${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case -r cntk -o ${src}/cntk.js ${third_party}/${identifier}/Source/CNTKv2LibraryDll/proto/CNTK.proto
-node ${tools}/metadata/update_pbjs.js array ${src}/cntk.js value float 1
+${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case -r cntk -o ${src}/cntk-proto.js ${third_party}/${identifier}/Source/CNTKv2LibraryDll/proto/CNTK.proto
+node ${tools}/metadata/update_pbjs.js array ${src}/cntk-proto.js value float 1

+ 2 - 2
tools/metadata/coreml-update

@@ -22,5 +22,5 @@ else
 fi
 
 echo "Generate 'coreml.js'"
-${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case -r coreml -o ${src}/coreml.js ${third_party}/${identifier}/mlmodel/format/Model.proto
-node ${tools}/metadata/update_pbjs.js array ${src}/coreml.js floatValue float 2
+${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case -r coreml -o ${src}/coreml-proto.js ${third_party}/${identifier}/mlmodel/format/Model.proto
+node ${tools}/metadata/update_pbjs.js array ${src}/coreml-proto.js floatValue float 2

+ 3 - 3
tools/metadata/onnx-update

@@ -69,7 +69,7 @@ fi
 
 if [ ${__update} ]; then
     echo "Generate 'onnx.js'"
-    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r onnx -o ${src}/onnx.js ${third_party}/${identifier}/onnx/onnx-ml.proto ${third_party}/${identifier}/onnx/onnx-operators-ml.proto
-    node ${tools}/metadata/update_pbjs.js array ${src}/onnx.js float_data float 1
-    node ${tools}/metadata/update_pbjs.js array ${src}/onnx.js double_data double 1
+    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r onnx -o ${src}/onnx-proto.js ${third_party}/${identifier}/onnx/onnx-ml.proto ${third_party}/${identifier}/onnx/onnx-operators-ml.proto
+    node ${tools}/metadata/update_pbjs.js array ${src}/onnx-proto.js float_data float 1
+    node ${tools}/metadata/update_pbjs.js array ${src}/onnx-proto.js double_data double 1
 fi

+ 2 - 2
tools/metadata/pytorch-update

@@ -45,8 +45,8 @@ fi
 
 if [ ${__update} ]; then
     echo "Generate 'caffe2.js'"
-    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r caffe2 -o ${src}/caffe2.js ${third_party}/pytorch/caffe2/proto/caffe2.proto
-    node ${tools}/metadata/update_pbjs.js enumeration ${src}/caffe2.js floats float 1
+    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r caffe2 -o ${src}/caffe2-proto.js ${third_party}/pytorch/caffe2/proto/caffe2.proto
+    node ${tools}/metadata/update_pbjs.js enumeration ${src}/caffe2-proto.js floats float 1
 fi
 
 if [ ${__build} ]; then

+ 1 - 1
tools/metadata/tf-update

@@ -44,7 +44,7 @@ fi
 
 if [ ${__update} ]; then
     echo "Generate 'tf.js'"
-    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r tf -o ${src}/tf.js \
+    ${node_modules}/protobufjs/bin/pbjs -t static-module -w closure --no-encode --no-delimited --no-comments --keep-case --decode-text -r tf -o ${src}/tf-proto.js \
         ${third_party}/${identifier}/tensorflow/core/protobuf/saved_model.proto \
         ${third_party}/${identifier}/tensorflow/core/protobuf/meta_graph.proto \
         ${third_party}/${identifier}/tensorflow/core/protobuf/saver.proto \

+ 1 - 1
tools/metadata/tflite-update

@@ -38,4 +38,4 @@ fi
 
 echo "Generate '../src/tflite.js'"
 ${third_party}/flatbuffers/flatc --js ${third_party}/tensorflow/tensorflow/lite/schema/schema.fbs 
-mv ./schema_generated.js ${src}/tflite.js
+mv ./schema_generated.js ${src}/tflite-schema.js