inference.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. # *****************************************************************************
  2. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  3. #
  4. # Redistribution and use in source and binary forms, with or without
  5. # modification, are permitted provided that the following conditions are met:
  6. # * Redistributions of source code must retain the above copyright
  7. # notice, this list of conditions and the following disclaimer.
  8. # * Redistributions in binary form must reproduce the above copyright
  9. # notice, this list of conditions and the following disclaimer in the
  10. # documentation and/or other materials provided with the distribution.
  11. # * Neither the name of the NVIDIA CORPORATION nor the
  12. # names of its contributors may be used to endorse or promote products
  13. # derived from this software without specific prior written permission.
  14. #
  15. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  16. # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17. # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
  19. # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  20. # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  21. # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  22. # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  24. # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. #
  26. # *****************************************************************************
  27. from tacotron2.text import text_to_sequence
  28. import models
  29. import torch
  30. import argparse
  31. import os
  32. import numpy as np
  33. from scipy.io.wavfile import write
  34. import matplotlib
  35. import matplotlib.pyplot as plt
  36. import sys
  37. import time
  38. import dllogger as DLLogger
  39. from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
  40. from waveglow.denoiser import Denoiser
  41. def parse_args(parser):
  42. """
  43. Parse commandline arguments.
  44. """
  45. parser.add_argument('-i', '--input', type=str, required=True,
  46. help='full path to the input text (phareses separated by new line)')
  47. parser.add_argument('-o', '--output', required=True,
  48. help='output folder to save audio (file per phrase)')
  49. parser.add_argument('--suffix', type=str, default="", help="output filename suffix")
  50. parser.add_argument('--tacotron2', type=str,
  51. help='full path to the Tacotron2 model checkpoint file')
  52. parser.add_argument('--waveglow', type=str,
  53. help='full path to the WaveGlow model checkpoint file')
  54. parser.add_argument('-s', '--sigma-infer', default=0.9, type=float)
  55. parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
  56. parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
  57. help='Sampling rate')
  58. run_mode = parser.add_mutually_exclusive_group()
  59. run_mode.add_argument('--fp16', action='store_true',
  60. help='Run inference with mixed precision')
  61. run_mode.add_argument('--cpu', action='store_true',
  62. help='Run inference on CPU')
  63. parser.add_argument('--log-file', type=str, default='nvlog.json',
  64. help='Filename for logging')
  65. parser.add_argument('--include-warmup', action='store_true',
  66. help='Include warmup')
  67. parser.add_argument('--stft-hop-length', type=int, default=256,
  68. help='STFT hop length for estimating audio length from mel size')
  69. return parser
  70. def checkpoint_from_distributed(state_dict):
  71. """
  72. Checks whether checkpoint was generated by DistributedDataParallel. DDP
  73. wraps model in additional "module.", it needs to be unwrapped for single
  74. GPU inference.
  75. :param state_dict: model's state dict
  76. """
  77. ret = False
  78. for key, _ in state_dict.items():
  79. if key.find('module.') != -1:
  80. ret = True
  81. break
  82. return ret
  83. def unwrap_distributed(state_dict):
  84. """
  85. Unwraps model from DistributedDataParallel.
  86. DDP wraps model in additional "module.", it needs to be removed for single
  87. GPU inference.
  88. :param state_dict: model's state dict
  89. """
  90. new_state_dict = {}
  91. for key, value in state_dict.items():
  92. new_key = key.replace('module.', '')
  93. new_state_dict[new_key] = value
  94. return new_state_dict
  95. def load_and_setup_model(model_name, parser, checkpoint, fp16_run, cpu_run,
  96. forward_is_infer=False, jittable=False):
  97. model_parser = models.model_parser(model_name, parser, add_help=False)
  98. model_args, _ = model_parser.parse_known_args()
  99. model_config = models.get_model_config(model_name, model_args)
  100. model = models.get_model(model_name, model_config, cpu_run=cpu_run,
  101. forward_is_infer=forward_is_infer,
  102. jittable=jittable)
  103. if checkpoint is not None:
  104. if cpu_run:
  105. state_dict = torch.load(checkpoint, map_location=torch.device('cpu'))['state_dict']
  106. else:
  107. state_dict = torch.load(checkpoint)['state_dict']
  108. if checkpoint_from_distributed(state_dict):
  109. state_dict = unwrap_distributed(state_dict)
  110. model.load_state_dict(state_dict)
  111. if model_name == "WaveGlow":
  112. model = model.remove_weightnorm(model)
  113. model.eval()
  114. if fp16_run:
  115. model.half()
  116. return model
  117. # taken from tacotron2/data_function.py:TextMelCollate.__call__
  118. def pad_sequences(batch):
  119. # Right zero-pad all one-hot text sequences to max input length
  120. input_lengths, ids_sorted_decreasing = torch.sort(
  121. torch.LongTensor([len(x) for x in batch]),
  122. dim=0, descending=True)
  123. max_input_len = input_lengths[0]
  124. text_padded = torch.LongTensor(len(batch), max_input_len)
  125. text_padded.zero_()
  126. for i in range(len(ids_sorted_decreasing)):
  127. text = batch[ids_sorted_decreasing[i]]
  128. text_padded[i, :text.size(0)] = text
  129. return text_padded, input_lengths
  130. def prepare_input_sequence(texts, cpu_run=False):
  131. d = []
  132. for i,text in enumerate(texts):
  133. d.append(torch.IntTensor(
  134. text_to_sequence(text, ['english_cleaners'])[:]))
  135. text_padded, input_lengths = pad_sequences(d)
  136. if not cpu_run:
  137. text_padded = text_padded.cuda().long()
  138. input_lengths = input_lengths.cuda().long()
  139. else:
  140. text_padded = text_padded.long()
  141. input_lengths = input_lengths.long()
  142. return text_padded, input_lengths
  143. class MeasureTime():
  144. def __init__(self, measurements, key, cpu_run=False):
  145. self.measurements = measurements
  146. self.key = key
  147. self.cpu_run = cpu_run
  148. def __enter__(self):
  149. if not self.cpu_run:
  150. torch.cuda.synchronize()
  151. self.t0 = time.perf_counter()
  152. def __exit__(self, exc_type, exc_value, exc_traceback):
  153. if not self.cpu_run:
  154. torch.cuda.synchronize()
  155. self.measurements[self.key] = time.perf_counter() - self.t0
  156. def main():
  157. """
  158. Launches text to speech (inference).
  159. Inference is executed on a single GPU or CPU.
  160. """
  161. parser = argparse.ArgumentParser(
  162. description='PyTorch Tacotron 2 Inference')
  163. parser = parse_args(parser)
  164. args, _ = parser.parse_known_args()
  165. log_file = os.path.join(args.output, args.log_file)
  166. DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
  167. StdOutBackend(Verbosity.VERBOSE)])
  168. for k,v in vars(args).items():
  169. DLLogger.log(step="PARAMETER", data={k:v})
  170. DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
  171. tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
  172. args.fp16, args.cpu, forward_is_infer=True)
  173. waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
  174. args.fp16, args.cpu, forward_is_infer=True,
  175. jittable=True)
  176. denoiser = Denoiser(waveglow)
  177. if not args.cpu:
  178. denoiser.cuda()
  179. waveglow.make_ts_scriptable()
  180. jitted_waveglow = torch.jit.script(waveglow)
  181. jitted_tacotron2 = torch.jit.script(tacotron2)
  182. texts = []
  183. try:
  184. f = open(args.input, 'r')
  185. texts = f.readlines()
  186. except:
  187. print("Could not read file")
  188. sys.exit(1)
  189. if args.include_warmup:
  190. sequence = torch.randint(low=0, high=148, size=(1,50)).long()
  191. input_lengths = torch.IntTensor([sequence.size(1)]).long()
  192. if not args.cpu:
  193. sequence = sequence.cuda()
  194. input_lengths = input_lengths.cuda()
  195. for i in range(3):
  196. with torch.no_grad():
  197. mel, mel_lengths, _ = jitted_tacotron2(sequence, input_lengths)
  198. _ = jitted_waveglow(mel)
  199. measurements = {}
  200. sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
  201. with torch.no_grad(), MeasureTime(measurements, "tacotron2_time", args.cpu):
  202. mel, mel_lengths, alignments = jitted_tacotron2(sequences_padded, input_lengths)
  203. with torch.no_grad(), MeasureTime(measurements, "waveglow_time", args.cpu):
  204. audios = jitted_waveglow(mel, sigma=args.sigma_infer)
  205. audios = audios.float()
  206. with torch.no_grad(), MeasureTime(measurements, "denoiser_time", args.cpu):
  207. audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
  208. print("Stopping after",mel.size(2),"decoder steps")
  209. tacotron2_infer_perf = mel.size(0)*mel.size(2)/measurements['tacotron2_time']
  210. waveglow_infer_perf = audios.size(0)*audios.size(1)/measurements['waveglow_time']
  211. DLLogger.log(step=0, data={"tacotron2_items_per_sec": tacotron2_infer_perf})
  212. DLLogger.log(step=0, data={"tacotron2_latency": measurements['tacotron2_time']})
  213. DLLogger.log(step=0, data={"waveglow_items_per_sec": waveglow_infer_perf})
  214. DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
  215. DLLogger.log(step=0, data={"denoiser_latency": measurements['denoiser_time']})
  216. DLLogger.log(step=0, data={"latency": (measurements['tacotron2_time']+measurements['waveglow_time']+measurements['denoiser_time'])})
  217. for i, audio in enumerate(audios):
  218. plt.imshow(alignments[i].float().data.cpu().numpy().T, aspect="auto", origin="lower")
  219. figure_path = os.path.join(args.output,"alignment_"+str(i)+args.suffix+".png")
  220. plt.savefig(figure_path)
  221. audio = audio[:mel_lengths[i]*args.stft_hop_length]
  222. audio = audio/torch.max(torch.abs(audio))
  223. audio_path = os.path.join(args.output,"audio_"+str(i)+args.suffix+".wav")
  224. write(audio_path, args.sampling_rate, audio.cpu().numpy())
  225. DLLogger.flush()
  226. if __name__ == '__main__':
  227. main()