Lutz Roeder 1 год назад
Родитель
Сommit
1115faface

+ 0 - 8
source/coreml-metadata.json

@@ -244,10 +244,6 @@
       { "name": "z" }
     ]
   },
-  {
-    "name": "mvn",
-    "description": "Fill a constant value in the padded region."
-  },
   {
     "name": "mvn",
     "category": "Normalization",
@@ -329,10 +325,6 @@
     "category": "Shape",
     "description": "A layer that repeats a sequence."
   },
-  {
-    "name": "slice",
-    "description": "A layer that slices the input data along a given axis."
-  },
   {
     "name": "slice",
     "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs."

+ 15 - 63
source/keras-metadata.json

@@ -130,6 +130,21 @@
         "name": "seed",
         "description": "A Python integer to use as random seed incase of `dropout`."
       }
+    ],
+    "inputs": [
+      {
+        "name": "query",
+        "type": "Tensor[]"
+      },
+      {
+        "name": "value",
+        "type": "Tensor[]"
+      }
+    ],
+    "outputs": [
+      {
+        "name": "output"
+      }
     ]
   },
   {
@@ -4570,69 +4585,6 @@
       }
     ]
   },
-  {
-    "name": "Attention",
-    "category": "Attention",
-    "module": "keras.layers",
-    "inputs": [
-      {
-        "name": "query",
-        "type": "Tensor[]"
-      },
-      {
-        "name": "value",
-        "type": "Tensor[]"
-      }
-    ],
-    "outputs": [
-      {
-        "name": "output"
-      }
-    ],
-    "description": "Dot-product attention layer, a.k.a. Luong-style attention.\n\nInputs are a list with 2 or 3 elements:\n1. A `query` tensor of shape `(batch_size, Tq, dim)`.\n2. A `value` tensor of shape `(batch_size, Tv, dim)`.\n3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none\n    supplied, `value` will be used as a `key`.\n\nThe calculation follows the steps:\n1. Calculate attention scores using `query` and `key` with shape\n    `(batch_size, Tq, Tv)`.\n2. Use scores to calculate a softmax distribution with shape\n    `(batch_size, Tq, Tv)`.\n3. Use the softmax distribution to create a linear combination of `value`\n    with shape `(batch_size, Tq, dim)`.",
-    "attributes": [
-      {
-        "name": "use_scale",
-        "description": "If `True`, will create a scalar variable to scale the\n        attention scores."
-      },
-      {
-        "name": "causal",
-        "description": "Boolean. Set to `True` for decoder self-attention. Adds a mask\n    such that position `i` cannot attend to positions `j > i`. This prevents\n    the flow of information from the future towards the past.  Defaults to\n    `False`."
-      },
-      {
-        "name": "dropout",
-        "description": "Float between 0 and 1. Fraction of the units to drop for the\n        attention scores. Defaults to `0.0`."
-      },
-      {
-        "name": "inputs",
-        "description": "List of the following tensors:\n        - `query`: Query tensor of shape `(batch_size, Tq, dim)`.\n        - `value`: Value tensor of shape `(batch_size, Tv, dim)`.\n        - `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If\n            not given, will use `value` for both `key` and `value`, which is\n            the most common case."
-      },
-      {
-        "name": "mask",
-        "description": "List of the following tensors:\n        - `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.\n            If given, the output will be zero at the positions where\n            `mask==False`.\n        - `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.\n            If given, will apply the mask such that values at positions\n             where `mask==False` do not contribute to the result."
-      },
-      {
-        "name": "return_attention_scores",
-        "description": "bool, it `True`, returns the attention scores\n        (after masking and softmax) as an additional output argument."
-      },
-      {
-        "name": "training",
-        "description": "Python boolean indicating whether the layer should behave in\n        training mode (adding dropout) or in inference mode (no dropout)."
-      },
-      {
-        "name": "score_mode",
-        "description": "Function to use to compute attention scores, one of\n        `{\"dot\", \"concat\"}`. `\"dot\"` refers to the dot product between the\n        query and key vectors. `\"concat\"` refers to the hyperbolic tangent\n        of the concatenation of the `query` and `key` vectors.\n\nCall Args:"
-      },
-      {
-        "name": "use_causal_mask",
-        "description": "Boolean. Set to `True` for decoder self-attention. Adds\n        a mask such that position `i` cannot attend to positions `j > i`.\n        This prevents the flow of information from the future towards the\n        past. Defaults to `False`.\n\nOutput:\n    Attention outputs of shape `(batch_size, Tq, dim)`.\n    (Optional) Attention scores after masking and softmax with shape\n        `(batch_size, Tq, Tv)`."
-      },
-      {
-        "name": "seed",
-        "description": "A Python integer to use as random seed incase of `dropout`."
-      }
-    ]
-  },
   {
     "name": "nn.relu",
     "category": "Activation",

+ 0 - 18
source/mxnet-metadata.json

@@ -32,15 +32,6 @@
       { "name": "output" }
     ]
   },
-  {
-    "name": "_minus_scalar",
-    "inputs": [
-      { "name": "input" }
-    ],
-    "outputs": [
-      { "name": "output" }
-    ]
-  },
   {
     "name": "_mul",
     "inputs": [
@@ -63,15 +54,6 @@
       { "name": "output" }
     ]
   },
-  {
-    "name": "_mul_scalar",
-    "inputs": [
-      { "name": "input" }
-    ],
-    "outputs": [
-      { "name": "output" }
-    ]
-  },
   {
     "name": "_Plus",
     "inputs": [

+ 8 - 7
source/openvino-metadata.json

@@ -761,6 +761,10 @@
         "type": "float32"
       }
     ],
+    "inputs": [
+      { "name": "input" },
+      { "name": "data" }
+    ],
     "status": "default"
   },
   {
@@ -1116,6 +1120,10 @@
         "type": "int32"
       }
     ],
+    "inputs": [
+      { "name": "output_size" },
+      { "name": "image_size" }
+    ],
     "status": "default"
   },
   {
@@ -1247,13 +1255,6 @@
     ],
     "status": "default"
   },
-  {
-    "name": "PriorBox",
-    "inputs": [
-      { "name": "output_size" },
-      { "name": "image_size" }
-    ]
-  },
   {
     "name": "PSROIPooling",
     "category": "Pool",

+ 4 - 3
source/pytorch.js

@@ -3955,12 +3955,13 @@ pytorch.Metadata = class {
 
     static async open(context) {
         if (!pytorch.Metadata._metadata) {
+            let data = null;
             try {
-                const data = await context.request('pytorch-metadata.json');
-                pytorch.Metadata._metadata = new pytorch.Metadata(data);
+                data = await context.request('pytorch-metadata.json');
             } catch {
-                pytorch.Metadata._metadata = new pytorch.Metadata(null);
+                // continue regardless of error
             }
+            pytorch.Metadata._metadata = new pytorch.Metadata(data);
         }
         return pytorch.Metadata._metadata;
     }

+ 0 - 106
source/sklearn-metadata.json

@@ -2135,112 +2135,6 @@
       }
     ]
   },
-  {
-    "name": "sklearn.svm.SVC",
-    "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`~sklearn.svm.LinearSVC` or\n:class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`~sklearn.kernel_approximation.Nystroem` transformer or\nother :ref:`kernel_approximation`.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nTo learn how to tune SVC's hyperparameters, see the following example:\n:ref:`sphx_glr_auto_examples_model_selection_plot_nested_cross_validation_iris.py`\n\nRead more in the :ref:`User Guide <svm_classification>`.\n",
-    "attributes": [
-      {
-        "default": 1.0,
-        "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty. For an intuitive visualization of the effects\nof scaling the regularization parameter C, see\n:ref:`sphx_glr_auto_examples_svm_plot_svm_scale_c.py`.\n",
-        "name": "C",
-        "optional": true,
-        "type": "float32"
-      },
-      {
-        "default": "rbf",
-        "description": "Specifies the kernel type to be used in the algorithm. If\nnone is given, 'rbf' will be used. If a callable is given it is used to\npre-compute the kernel matrix from data matrices; that matrix should be\nan array of shape ``(n_samples, n_samples)``. For an intuitive\nvisualization of different kernel types see\n:ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.\n",
-        "name": "kernel",
-        "optional": true,
-        "type": "string"
-      },
-      {
-        "default": 3,
-        "description": "Degree of the polynomial kernel function ('poly').\nMust be non-negative. Ignored by all other kernels.\n",
-        "name": "degree",
-        "optional": true,
-        "type": "int32"
-      },
-      {
-        "default": "scale",
-        "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features\n- if float, must be non-negative.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n",
-        "name": "gamma",
-        "optional": true,
-        "type": "float32"
-      },
-      {
-        "default": 0.0,
-        "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n",
-        "name": "coef0",
-        "optional": true,
-        "type": "float32"
-      },
-      {
-        "default": true,
-        "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide <shrinking_svm>`.\n",
-        "name": "shrinking",
-        "optional": true,
-        "type": "boolean"
-      },
-      {
-        "default": false,
-        "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide <scores_probabilities>`.\n",
-        "name": "probability",
-        "optional": true,
-        "type": "boolean"
-      },
-      {
-        "default": 0.001,
-        "description": "Tolerance for stopping criterion.\n",
-        "name": "tol",
-        "optional": true,
-        "type": "float32"
-      },
-      {
-        "default": 200.0,
-        "description": "Specify the size of the kernel cache (in MB).\n",
-        "name": "cache_size",
-        "optional": true,
-        "type": "float32"
-      },
-      {
-        "default": null,
-        "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n",
-        "name": "class_weight",
-        "optional": true
-      },
-      {
-        "default": false,
-        "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n",
-        "name": "verbose",
-        "type": "boolean"
-      },
-      {
-        "default": -1,
-        "description": "Hard limit on iterations within solver, or -1 for no limit.\n",
-        "name": "max_iter",
-        "optional": true,
-        "type": "int32"
-      },
-      {
-        "default": "ovr",
-        "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, note that\ninternally, one-vs-one ('ovo') is always used as a multi-class strategy\nto train models; an ovr matrix is only constructed from the ovo matrix.\nThe parameter is ignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n",
-        "name": "decision_function_shape"
-      },
-      {
-        "default": null,
-        "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary <random_state>`.\n",
-        "name": "random_state",
-        "optional": true
-      },
-      {
-        "default": false,
-        "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n",
-        "name": "break_ties",
-        "optional": true,
-        "type": "boolean"
-      }
-    ]
-  },
   {
     "name": "sklearn.tree.tree.DecisionTreeClassifier",
     "description": "A decision tree classifier.\n\nRead more in the :ref:`User Guide <tree>`.\n",

+ 2 - 2
source/tf.js

@@ -391,10 +391,10 @@ tf.ModelFactory = class {
                                 node.__metadata__ = Array.from(metadata.get(node.op) || []);
                             }
                         }
-                        return saved_model;
                     } catch {
-                        return saved_model;
+                        // continue regardless of error
                     }
+                    return saved_model;
                 };
                 const updated_saved_model = await openPyTorchMetadata(context, saved_model);
                 return openModel(updated_saved_model, format, producer, null);

+ 8 - 4
source/view.js

@@ -6237,13 +6237,14 @@ view.Metadata = class {
         view.Metadata._metadata = view.Metadata._metadata || new Map();
         const metadata = view.Metadata._metadata;
         if (!metadata.has(name)) {
+            let data = null;
             try {
-                const content = await context.request(name);
-                const types = JSON.parse(content);
-                metadata.set(name, new view.Metadata(types));
+                data = await context.request(name);
             } catch {
-                metadata.set(name, new view.Metadata(null));
+                // continue regardless of error
             }
+            const types = JSON.parse(data);
+            metadata.set(name, new view.Metadata(types));
         }
         return metadata.get(name);
     }
@@ -6254,6 +6255,9 @@ view.Metadata = class {
         this._inputs = new Map();
         if (Array.isArray(types)) {
             for (const type of types) {
+                if (this._types.has(type.name)) {
+                    // throw new view.Error(`Duplicate type metadata '${type.name}'.`);
+                }
                 this._types.set(type.name, type);
                 if (type.identifier !== undefined) {
                     this._types.set(type.identifier, type);