entrypoints.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. # *****************************************************************************
  2. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
  3. #
  4. # Redistribution and use in source and binary forms, with or without
  5. # modification, are permitted provided that the following conditions are met:
  6. # * Redistributions of source code must retain the above copyright
  7. # notice, this list of conditions and the following disclaimer.
  8. # * Redistributions in binary form must reproduce the above copyright
  9. # notice, this list of conditions and the following disclaimer in the
  10. # documentation and/or other materials provided with the distribution.
  11. # * Neither the name of the NVIDIA CORPORATION nor the
  12. # names of its contributors may be used to endorse or promote products
  13. # derived from this software without specific prior written permission.
  14. #
  15. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  16. # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17. # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
  19. # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  20. # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  21. # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  22. # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  24. # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. #
  26. # *****************************************************************************
  27. import urllib.request
  28. import torch
  29. import os
  30. import sys
  31. #from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
  32. def checkpoint_from_distributed(state_dict):
  33. """
  34. Checks whether checkpoint was generated by DistributedDataParallel. DDP
  35. wraps model in additional "module.", it needs to be unwrapped for single
  36. GPU inference.
  37. :param state_dict: model's state dict
  38. """
  39. ret = False
  40. for key, _ in state_dict.items():
  41. if key.find('module.') != -1:
  42. ret = True
  43. break
  44. return ret
  45. # from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
  46. def unwrap_distributed(state_dict):
  47. """
  48. Unwraps model from DistributedDataParallel.
  49. DDP wraps model in additional "module.", it needs to be removed for single
  50. GPU inference.
  51. :param state_dict: model's state dict
  52. """
  53. new_state_dict = {}
  54. for key, value in state_dict.items():
  55. new_key = key.replace('module.1.', '')
  56. new_key = new_key.replace('module.', '')
  57. new_state_dict[new_key] = value
  58. return new_state_dict
  59. def _download_checkpoint(checkpoint, force_reload):
  60. model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
  61. if not os.path.exists(model_dir):
  62. os.makedirs(model_dir)
  63. ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
  64. if not os.path.exists(ckpt_file) or force_reload:
  65. sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
  66. urllib.request.urlretrieve(checkpoint, ckpt_file)
  67. return ckpt_file
  68. def nvidia_fastpitch(pretrained=True, **kwargs):
  69. """TODO
  70. """
  71. from fastpitch import model as fastpitch
  72. force_reload = "force_reload" in kwargs and kwargs["force_reload"]
  73. fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
  74. if pretrained:
  75. checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/fastpitch__pyt_ckpt/versions/21.12.1_amp/files/nvidia_fastpitch_210824+cfg.pt'
  76. ckpt_file = _download_checkpoint(checkpoint, force_reload)
  77. ckpt = torch.load(ckpt_file)
  78. state_dict = ckpt['state_dict']
  79. if checkpoint_from_distributed(state_dict):
  80. state_dict = unwrap_distributed(state_dict)
  81. config = ckpt['config']
  82. train_setup = ckpt.get('train_setup', {})
  83. else:
  84. config = {'n_mel_channels': 80, 'n_symbols': 148, 'padding_idx': 0, 'symbols_embedding_dim': 384,
  85. 'in_fft_n_layers': 6, 'in_fft_n_heads': 1, 'in_fft_d_head': 64, 'in_fft_conv1d_kernel_size': 3,
  86. 'in_fft_conv1d_filter_size': 1536, 'in_fft_output_size': 384, 'p_in_fft_dropout': 0.1,
  87. 'p_in_fft_dropatt': 0.1, 'p_in_fft_dropemb': 0.0, 'out_fft_n_layers': 6, 'out_fft_n_heads': 1,
  88. 'out_fft_d_head': 64, 'out_fft_conv1d_kernel_size': 3, 'out_fft_conv1d_filter_size': 1536,
  89. 'out_fft_output_size': 384, 'p_out_fft_dropout': 0.1, 'p_out_fft_dropatt': 0.1, 'p_out_fft_dropemb': 0.0,
  90. 'dur_predictor_kernel_size': 3, 'dur_predictor_filter_size': 256, 'p_dur_predictor_dropout': 0.1,
  91. 'dur_predictor_n_layers': 2, 'pitch_predictor_kernel_size': 3, 'pitch_predictor_filter_size': 256,
  92. 'p_pitch_predictor_dropout': 0.1, 'pitch_predictor_n_layers': 2, 'pitch_embedding_kernel_size': 3,
  93. 'n_speakers': 1, 'speaker_emb_weight': 1.0, 'energy_predictor_kernel_size': 3,
  94. 'energy_predictor_filter_size': 256, 'p_energy_predictor_dropout': 0.1, 'energy_predictor_n_layers': 2,
  95. 'energy_conditioning': True, 'energy_embedding_kernel_size': 3}
  96. for k,v in kwargs.items():
  97. if k in config.keys():
  98. config[k] = v
  99. train_setup = {}
  100. model = fastpitch.FastPitch(**config)
  101. if pretrained:
  102. model.load_state_dict(state_dict)
  103. if fp16:
  104. model.half()
  105. model.forward = model.infer
  106. return model, train_setup
  107. def nvidia_textprocessing_utils(cmudict_path, heteronyms_path, **kwargs):
  108. from common.text.text_processing import TextProcessing
  109. import numpy as np
  110. from torch.nn.utils.rnn import pad_sequence
  111. from common.text import cmudict
  112. class TextPreProcessing:
  113. @staticmethod
  114. def prepare_input_sequence(texts, batch_size=1, device='cpu'):
  115. cmudict.initialize(cmudict_path, heteronyms_path)
  116. tp = TextProcessing(symbol_set='english_basic', cleaner_names=['english_cleaners_v2'], p_arpabet=1.0)
  117. fields={}
  118. fields['text'] = [torch.LongTensor(tp.encode_text(text))
  119. for text in texts]
  120. order = np.argsort([-t.size(0) for t in fields['text']])
  121. fields['text'] = [fields['text'][i] for i in order]
  122. fields['text_lens'] = torch.LongTensor([t.size(0) for t in fields['text']])
  123. for t in fields['text']:
  124. print(tp.sequence_to_text(t.numpy()))
  125. # cut into batches & pad
  126. batches = []
  127. for b in range(0, len(order), batch_size):
  128. batch = {f: values[b:b+batch_size] for f, values in fields.items()}
  129. for f in batch:
  130. if f == 'text':
  131. batch[f] = pad_sequence(batch[f], batch_first=True)
  132. if type(batch[f]) is torch.Tensor:
  133. batch[f] = batch[f].to(device)
  134. batches.append(batch)
  135. return batches
  136. return TextPreProcessing()
  137. # # from tacotron2.text import text_to_sequence
  138. # @staticmethod
  139. # def pad_sequences(batch):
  140. # # Right zero-pad all one-hot text sequences to max input length
  141. # input_lengths, ids_sorted_decreasing = torch.sort(
  142. # torch.LongTensor([len(x) for x in batch]),
  143. # dim=0, descending=True)
  144. # max_input_len = input_lengths[0]
  145. # text_padded = torch.LongTensor(len(batch), max_input_len)
  146. # text_padded.zero_()
  147. # for i in range(len(ids_sorted_decreasing)):
  148. # text = batch[ids_sorted_decreasing[i]]
  149. # text_padded[i, :text.size(0)] = text
  150. # return text_padded, input_lengths
  151. # @staticmethod
  152. # def prepare_input_sequence(texts, cpu_run=False):
  153. # d = []
  154. # # for i,text in enumerate(texts):
  155. # # d.append(torch.IntTensor(
  156. # # Processing.text_to_sequence(text, ['english_cleaners'])[:]))
  157. # text_padded, input_lengths = Processing.pad_sequences(d)
  158. # if not cpu_run:
  159. # text_padded = text_padded.cuda().long()
  160. # input_lengths = input_lengths.cuda().long()
  161. # else:
  162. # text_padded = text_padded.long()
  163. # input_lengths = input_lengths.long()
  164. # return text_padded, input_lengths
  165. # return Processing()