inference_perf.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. # *****************************************************************************
  2. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  3. #
  4. # Redistribution and use in source and binary forms, with or without
  5. # modification, are permitted provided that the following conditions are met:
  6. # * Redistributions of source code must retain the above copyright
  7. # notice, this list of conditions and the following disclaimer.
  8. # * Redistributions in binary form must reproduce the above copyright
  9. # notice, this list of conditions and the following disclaimer in the
  10. # documentation and/or other materials provided with the distribution.
  11. # * Neither the name of the NVIDIA CORPORATION nor the
  12. # names of its contributors may be used to endorse or promote products
  13. # derived from this software without specific prior written permission.
  14. #
  15. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  16. # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17. # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
  19. # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  20. # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  21. # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  22. # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  24. # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. #
  26. # *****************************************************************************
  27. import models
  28. import torch
  29. import argparse
  30. import numpy as np
  31. import json
  32. import time
  33. import os
  34. import sys
  35. import random
  36. from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, MeasureTime, prepare_input_sequence
  37. import dllogger as DLLogger
  38. from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
  39. def parse_args(parser):
  40. """
  41. Parse commandline arguments.
  42. """
  43. parser.add_argument('-m', '--model-name', type=str, default='',
  44. required=True, help='Model to train')
  45. parser.add_argument('--model', type=str, default='',
  46. help='Full path to the model checkpoint file')
  47. parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
  48. help='Sampling rate')
  49. parser.add_argument('--fp16', action='store_true',
  50. help='inference with AMP')
  51. parser.add_argument('-bs', '--batch-size', type=int, default=1)
  52. parser.add_argument('-o', '--output', type=str, required=True,
  53. help='Directory to save results')
  54. parser.add_argument('--log-file', type=str, default='nvlog.json',
  55. help='Filename for logging')
  56. parser.add_argument('--synth-data', action='store_true',
  57. help='Test with synthetic data')
  58. return parser
  59. def gen_text(use_synthetic_data):
  60. batch_size = 1
  61. text_len = 170
  62. if use_synthetic_data:
  63. text_padded = torch.randint(low=0, high=148,
  64. size=(batch_size, text_len),
  65. dtype=torch.long).cuda()
  66. input_lengths = torch.IntTensor([text_padded.size(1)]*
  67. batch_size).cuda().long()
  68. else:
  69. text = 'The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. '*2
  70. text = [text[:text_len]]
  71. text_padded, input_lengths = prepare_input_sequence(text)
  72. return (text_padded, input_lengths)
  73. def gen_mel(use_synthetic_data, n_mel_channels, fp16):
  74. if use_synthetic_data:
  75. batch_size = 1
  76. num_mels = 895
  77. mel_padded = torch.zeros(batch_size, n_mel_channels,
  78. num_mels).normal_(-5.62, 1.98).cuda()
  79. else:
  80. mel_padded = torch.load("data/mel.pt")
  81. if fp16:
  82. mel_padded = mel_padded.half()
  83. return mel_padded
  84. def main():
  85. """
  86. Launches inference benchmark.
  87. Inference is executed on a single GPU.
  88. """
  89. parser = argparse.ArgumentParser(
  90. description='PyTorch Tacotron 2 Inference')
  91. parser = parse_args(parser)
  92. args, _ = parser.parse_known_args()
  93. log_file = os.path.join(args.output, args.log_file)
  94. torch.manual_seed(1234)
  95. random.seed(1234)
  96. np.random.seed(1234)
  97. DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
  98. StdOutBackend(Verbosity.VERBOSE)])
  99. for k,v in vars(args).items():
  100. DLLogger.log(step="PARAMETER", data={k:v})
  101. DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
  102. DLLogger.metadata('infer_latency', {'unit': 's'})
  103. DLLogger.metadata('infer_items_per_sec', {'unit': 'items/s'})
  104. if args.synth_data:
  105. model = load_and_setup_model(args.model_name, parser, None, args.fp16,
  106. cpu_run=False, forward_is_infer=True)
  107. else:
  108. if not os.path.isfile(args.model):
  109. print(f"File {args.model} does not exist!")
  110. sys.exit(1)
  111. model = load_and_setup_model(args.model_name, parser, args.model,
  112. args.fp16, cpu_run=False,
  113. forward_is_infer=True)
  114. if args.model_name == "Tacotron2":
  115. model = torch.jit.script(model)
  116. warmup_iters = 6
  117. num_iters = warmup_iters + 1
  118. for i in range(num_iters):
  119. measurements = {}
  120. if args.model_name == 'Tacotron2':
  121. text_padded, input_lengths = gen_text(args.synth_data)
  122. with torch.no_grad(), MeasureTime(measurements, "inference_time"):
  123. mels, _, _ = model(text_padded, input_lengths)
  124. num_items = mels.size(0)*mels.size(2)
  125. if args.model_name == 'WaveGlow':
  126. n_mel_channels = model.upsample.in_channels
  127. mel_padded = gen_mel(args.synth_data, n_mel_channels, args.fp16)
  128. with torch.no_grad(), MeasureTime(measurements, "inference_time"):
  129. audios = model(mel_padded)
  130. audios = audios.float()
  131. num_items = audios.size(0)*audios.size(1)
  132. if i >= warmup_iters:
  133. DLLogger.log(step=(i-warmup_iters,), data={"latency": measurements['inference_time']})
  134. DLLogger.log(step=(i-warmup_iters,), data={"items_per_sec": num_items/measurements['inference_time']})
  135. DLLogger.log(step=tuple(),
  136. data={'infer_latency': measurements['inference_time']})
  137. DLLogger.log(step=tuple(),
  138. data={'infer_items_per_sec': num_items/measurements['inference_time']})
  139. DLLogger.flush()
  140. if __name__ == '__main__':
  141. main()