run_inference_all_v100.sh 1.0 KB

123456789101112131415161718192021222324252627282930
  1. #!/bin/bash
  2. # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. set -e
  15. if [[ "$(docker ps | grep triton_kaldi_server | wc -l)" == "0" ]]; then
  16. printf "\nThe Triton server is currently not running. Please run scripts/docker/launch_server.sh\n\n"
  17. exit 1
  18. fi
  19. printf "\nOffline benchmarks:\n"
  20. scripts/docker/launch_client.sh -i 5 -c 2000
  21. printf "\nOnline benchmarks:\n"
  22. scripts/docker/launch_client.sh -i 10 -c 2000 -o
  23. scripts/docker/launch_client.sh -i 10 -c 1000 -o