| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112 |
- # *****************************************************************************
- # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
- #
- # Redistribution and use in source and binary forms, with or without
- # modification, are permitted provided that the following conditions are met:
- # * Redistributions of source code must retain the above copyright
- # notice, this list of conditions and the following disclaimer.
- # * Redistributions in binary form must reproduce the above copyright
- # notice, this list of conditions and the following disclaimer in the
- # documentation and/or other materials provided with the distribution.
- # * Neither the name of the NVIDIA CORPORATION nor the
- # names of its contributors may be used to endorse or promote products
- # derived from this software without specific prior written permission.
- #
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- #
- # *****************************************************************************
- import urllib.request
- import torch
- import os
- import sys
- #from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
- def checkpoint_from_distributed(state_dict):
- """
- Checks whether checkpoint was generated by DistributedDataParallel. DDP
- wraps model in additional "module.", it needs to be unwrapped for single
- GPU inference.
- :param state_dict: model's state dict
- """
- ret = False
- for key, _ in state_dict.items():
- if key.find('module.') != -1:
- ret = True
- break
- return ret
- # from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
- def unwrap_distributed(state_dict):
- """
- Unwraps model from DistributedDataParallel.
- DDP wraps model in additional "module.", it needs to be removed for single
- GPU inference.
- :param state_dict: model's state dict
- """
- new_state_dict = {}
- for key, value in state_dict.items():
- new_key = key.replace('module.1.', '')
- new_key = new_key.replace('module.', '')
- new_state_dict[new_key] = value
- return new_state_dict
- def _download_checkpoint(checkpoint, force_reload):
- model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
- if not os.path.exists(ckpt_file) or force_reload:
- sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
- urllib.request.urlretrieve(checkpoint, ckpt_file)
- return ckpt_file
- def nvidia_hifigan(pretrained=True, **kwargs):
- """TODO
- """
- from hifigan import models as vocoder
- force_reload = "force_reload" in kwargs and kwargs["force_reload"]
- fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
-
- if pretrained:
- checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/hifigan__pyt_ckpt_mode-finetune_ds-ljs22khz/versions/21.08.0_amp/files/hifigan_gen_checkpoint_10000_ft.pt'
- ckpt_file = _download_checkpoint(checkpoint, force_reload)
- ckpt = torch.load(ckpt_file)
- state_dict = ckpt['generator']
- if checkpoint_from_distributed(state_dict):
- state_dict = unwrap_distributed(state_dict)
- config = ckpt['config']
- train_setup = ckpt.get('train_setup', {})
- else:
- config = {'upsample_rates': [8, 8, 2, 2], 'upsample_kernel_sizes': [16, 16, 4, 4],
- 'upsample_initial_channel': 512, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11],
- 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]]}
- for k,v in kwargs.items():
- if k in config.keys():
- config[k] = v
- train_setup = {}
- hifigan = vocoder.Generator(config)
- denoiser = None
- if pretrained:
- hifigan.load_state_dict(state_dict)
- hifigan.remove_weight_norm()
- denoiser = vocoder.Denoiser(hifigan, win_length=1024)
- if fp16:
- hifigan.half()
- denoiser.half()
- return hifigan, train_setup, denoiser
|