benchmark.py 3.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import argparse
  15. import json
  16. import sys
  17. import tempfile
  18. import json
  19. import os
  20. from collections import OrderedDict
  21. from subprocess import Popen
  22. parser = argparse.ArgumentParser(description='Benchmark')
  23. parser.add_argument('--executable', default='./runner', help='path to runner')
  24. parser.add_argument('-n', '--ngpus', metavar='N1,[N2,...]',
  25. required=True, help='numbers of gpus separated by comma')
  26. parser.add_argument('-b', '--batch-sizes', metavar='B1,[B2,...]',
  27. required=True, help='batch sizes separated by comma')
  28. parser.add_argument('-i', '--benchmark-iters', metavar='I',
  29. type=int, default=100, help='iterations')
  30. parser.add_argument('-e', '--epochs', metavar='E',
  31. type=int, default=1, help='number of epochs')
  32. parser.add_argument('-w', '--warmup', metavar='N',
  33. type=int, default=0, help='warmup epochs')
  34. parser.add_argument('-o', '--output', metavar='OUT', required=True, help="path to benchmark report")
  35. parser.add_argument('--only-inference', action='store_true', help="benchmark inference only")
  36. args, other_args = parser.parse_known_args()
  37. ngpus = list(map(int, args.ngpus.split(',')))
  38. batch_sizes = list(map(int, args.batch_sizes.split(',')))
  39. res = OrderedDict()
  40. res['model'] = ''
  41. res['ngpus'] = ngpus
  42. res['bs'] = batch_sizes
  43. if args.only_inference:
  44. res['metric_keys'] = ['val.total_ips']
  45. else:
  46. res['metric_keys'] = ['train.total_ips', 'val.total_ips']
  47. res['metrics'] = OrderedDict()
  48. for n in ngpus:
  49. res['metrics'][str(n)] = OrderedDict()
  50. for bs in batch_sizes:
  51. res['metrics'][str(n)][str(bs)] = OrderedDict()
  52. report_file = args.output + '-{},{}'.format(n, bs)
  53. Popen([args.executable, '-n', str(n), '-b', str(bs),
  54. '--benchmark-iters', str(args.benchmark_iters),
  55. '-e', str(args.epochs), '--report', report_file,
  56. *([] if not args.only_inference else ['--only-inference']),
  57. '--no-metrics'] + other_args, stdout=sys.stderr).wait()
  58. with open(report_file, 'r') as f:
  59. report = json.load(f)
  60. for metric in res['metric_keys']:
  61. data = report['metrics'][metric][args.warmup:]
  62. avg = len(data) / sum(map(lambda x: 1 / x, data))
  63. res['metrics'][str(n)][str(bs)][metric] = avg
  64. column_len = 7
  65. for m in res['metric_keys']:
  66. print(m, file=sys.stderr)
  67. print(' ' * column_len, end='|', file=sys.stderr)
  68. for bs in batch_sizes:
  69. print(str(bs).center(column_len), end='|', file=sys.stderr)
  70. print(file=sys.stderr)
  71. print('-' * (len(batch_sizes) + 1) * (column_len + 1), file=sys.stderr)
  72. for n in ngpus:
  73. print(str(n).center(column_len), end='|', file=sys.stderr)
  74. for bs in batch_sizes:
  75. print(str(round(res['metrics'][str(n)][str(bs)][m])).center(column_len), end='|', file=sys.stderr)
  76. print(file=sys.stderr)
  77. print(file=sys.stderr)
  78. with open(args.output, 'w') as f:
  79. json.dump(res, f, indent=4)