Browse Source

[ConvNets/PyT] Trition numpy dependency fixed

Jakub Kosek 3 years ago
parent
commit
a5feffa7ee

+ 3 - 1
PyTorch/Classification/ConvNets/.dockerignore

@@ -1,2 +1,4 @@
 *.pth.tar
-*.log
+*.log
+workspace/
+navigator_workspace/

+ 13 - 0
PyTorch/Classification/ConvNets/triton/config_model_on_triton.py

@@ -1,3 +1,16 @@
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 import logging
 from pathlib import Path
 

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/core.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/dump.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/__init__.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/__init__.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 0 - 0
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/onnx.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/onnx.py


+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/onnx2trt_conv.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/onnx2trt_conv.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/pyt.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/pyt.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/tensorrt.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/tensorrt.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
PyTorch/Classification/ConvNets/triton/deployment_toolkit/bermuda/utils.py → PyTorch/Classification/ConvNets/triton/deployment_toolkit/library/utils.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 14 - 1
PyTorch/Classification/ConvNets/triton/metric.py

@@ -1,3 +1,16 @@
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 from typing import Any, Dict, List, NamedTuple, Optional
 
 import numpy as np
@@ -21,6 +34,6 @@ class MetricsCalculator(BaseMetricsCalculator):
         print(y_real["OUTPUT__0"][:128])
 
         return {
-            "accuracy": np.mean(np.argmax(y_pred["OUTPUT__0"], axis=-1) == 
+            "accuracy": np.mean(np.argmax(y_pred["OUTPUT__0"], axis=-1) ==
                                 np.argmax(y_real["OUTPUT__0"], axis=-1))
         }

+ 13 - 0
PyTorch/Classification/ConvNets/triton/model.py

@@ -1,3 +1,16 @@
+# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 import torch
 
 def update_argparser(parser):

+ 0 - 1
PyTorch/Classification/ConvNets/triton/requirements.txt

@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 networkx==2.5
-numpy<1.20.0,>=1.19.1  # # numpy 1.20+ requires py37
 onnx==1.8.0
 onnxruntime==1.5.2
 pycuda>=2019.1.2

+ 2 - 1
PyTorch/Classification/ConvNets/triton/resnet50/Dockerfile

@@ -16,12 +16,13 @@ ENV PATH /workspace/install/bin:${PATH}
 ENV LD_LIBRARY_PATH /workspace/install/lib:${LD_LIBRARY_PATH}
 
 ENV PYTHONPATH /workspace
+ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
 WORKDIR /workspace
 
 ADD requirements.txt /workspace/requirements.txt
 ADD triton/requirements.txt /workspace/triton/requirements.txt
 RUN pip install -r /workspace/requirements.txt
-RUN pip install -r /workspace/triton/requirements.txt
+RUN pip install --use-feature=2020-resolver -r /workspace/triton/requirements.txt
 
 ADD . /workspace
 

+ 24 - 33
PyTorch/Classification/ConvNets/triton/resnet50/README.md

@@ -103,51 +103,49 @@ Running the following scripts will build and launch the container with all requi
  
    IMPORTANT: This step is executed on the host computer.
  
-   ```
+    ```
     git clone https://github.com/NVIDIA/DeepLearningExamples.git
     cd DeepLearningExamples/PyTorch/Classification/ConvNets
-   ```
+    ```
+   
 2. Setup the environment in the host computer and start Triton Inference Server.
  
-   ```
+    ```
     source triton/scripts/setup_environment.sh
     bash triton/scripts/docker/triton_inference_server.sh 
-   ```
+    ```
 
 3. Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
  
-   ```
+    ```
     bash triton/scripts/docker/build.sh
     bash triton/scripts/docker/interactive.sh
-   ```
+    ```
 
 
 4. Prepare the deployment configuration and create folders in Docker.
  
    IMPORTANT: These and the following commands must be executed in the PyTorch NGC container.
- 
- 
-   ```
+
+    ```
     source triton/scripts/setup_environment.sh
-   ```
+    ```
 
 5. Download and pre-process the dataset.
- 
- 
-   ```
+  
+    ```
     bash triton/scripts/download_data.sh
     bash triton/scripts/process_dataset.sh
-   ```
+    ```
  
 6. Setup the parameters for deployment.
  
-   ```
+    ```
     source triton/scripts/setup_parameters.sh
-   ```
+    ```
  
 7. Convert the model from training to inference format (e.g. TensorRT).
- 
- 
+  
    ```
     python3 triton/convert_model.py \
         --input-path triton/model.py \
@@ -230,7 +228,7 @@ Running the following scripts will build and launch the container with all requi
    presented below set the maximum latency to zero to achieve the best latency
    possible with good performance.
  
-   ```
+    ```
     python triton/run_online_performance_test_on_triton.py \
         --model-name ${MODEL_NAME} \
         --input-data random \
@@ -238,8 +236,7 @@ Running the following scripts will build and launch the container with all requi
         --triton-instances ${TRITON_INSTANCES} \
         --number-of-model-instances ${NUMBER_OF_MODEL_INSTANCES} \
         --result-path ${SHARED_DIR}/triton_performance_online.csv
- 
-   ```
+    ```
  
 
 
@@ -253,14 +250,14 @@ Running the following scripts will build and launch the container with all requi
    from increasing the batch size due to efficiency gains in the GPU with larger
    batches.
  
-   ```
+    ```
     python triton/run_offline_performance_test_on_triton.py \
         --model-name ${MODEL_NAME} \
         --input-data random \
         --batch-sizes ${BATCH_SIZE} \
         --triton-instances ${TRITON_INSTANCES} \
         --result-path ${SHARED_DIR}/triton_performance_offline.csv
-   ```
+    ```
  
 
 
@@ -290,8 +287,6 @@ BATCH_SIZE="1, 2, 4, 8, 16, 32, 64, 128"
 BACKEND_ACCELERATOR="cuda"
 MAX_BATCH_SIZE="128"
 NUMBER_OF_MODEL_INSTANCES="1"
-TRITON_MAX_QUEUE_DELAY="1"
-TRITON_PREFERRED_BATCH_SIZES="64 128"
 
 ```
 
@@ -332,10 +327,8 @@ The performance measurements in this document were conducted at the time of publ
 This table lists the common variable parameters for all performance measurements:
 | Parameter Name               | Parameter Value   |
 |:-----------------------------|:------------------|
-| Max Batch Size               | 128.0             |
-| Number of model instances    | 1.0               |
-| Triton Max Queue Delay       | 1.0               |
-| Triton Preferred Batch Sizes | 64 128            |
+| Max Batch Size               | 128             |
+| Number of model instances    | 1               |
 
 
 
@@ -508,10 +501,8 @@ Full tabular data
 This table lists the common variable parameters for all performance measurements:
 | Parameter Name               | Parameter Value   |
 |:-----------------------------|:------------------|
-| Max Batch Size               | 128.0             |
-| Number of model instances    | 1.0               |
-| Triton Max Queue Delay       | 1.0               |
-| Triton Preferred Batch Sizes | 64 128            |
+| Max Batch Size               | 128             |
+| Number of model instances    | 2               |