inference.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import argparse
  15. import math
  16. import os
  17. import random
  18. import time
  19. from heapq import nlargest
  20. from itertools import chain, repeat
  21. from pathlib import Path
  22. from tqdm import tqdm
  23. import dllogger
  24. import torch
  25. import numpy as np
  26. import torch.distributed as distrib
  27. from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
  28. from quartznet import config
  29. from common import helpers
  30. from common.dali.data_loader import DaliDataLoader
  31. from common.dataset import (AudioDataset, FilelistDataset, get_data_loader,
  32. SingleAudioDataset)
  33. from common.features import BaseFeatures, FilterbankFeatures
  34. from common.helpers import print_once, process_evaluation_epoch
  35. from common.tb_dllogger import stdout_metric_format, unique_log_fpath
  36. from nemo_dle_model_converter import load_nemo_ckpt
  37. from quartznet.model import GreedyCTCDecoder, QuartzNet
  38. def get_parser():
  39. parser = argparse.ArgumentParser(description='QuartzNet inference')
  40. parser.add_argument('--batch_size', default=16, type=int,
  41. help='Data batch size')
  42. parser.add_argument('--steps', default=0, type=int,
  43. help='Eval this many steps for every worker')
  44. parser.add_argument('--warmup_steps', default=0, type=int,
  45. help='Burn-in period before measuring latencies')
  46. parser.add_argument('--model_config', type=str, required=True,
  47. help='Relative model config path given dataset folder')
  48. parser.add_argument('--dataset_dir', type=str,
  49. help='Absolute path to dataset folder')
  50. parser.add_argument('--val_manifests', type=str, nargs='+',
  51. help='Relative path to evaluation dataset manifest files')
  52. parser.add_argument('--ckpt', default=None, type=str,
  53. help='Path to model checkpoint')
  54. parser.add_argument('--amp', '--fp16', action='store_true',
  55. help='Use FP16 precision')
  56. parser.add_argument('--cudnn_benchmark', action='store_true',
  57. help='Enable cudnn benchmark')
  58. parser.add_argument('--cpu', action='store_true',
  59. help='Run inference on CPU')
  60. parser.add_argument("--seed", default=None, type=int, help='Random seed')
  61. parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0),
  62. type=int, help='GPU id used for distributed training')
  63. io = parser.add_argument_group('feature and checkpointing setup')
  64. io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
  65. default='gpu', help='Use DALI pipeline for fast data processing')
  66. io.add_argument('--save_predictions', type=str, default=None,
  67. help='Save predictions in text form at this location')
  68. io.add_argument('--save_logits', default=None, type=str,
  69. help='Save output logits under specified path')
  70. io.add_argument('--transcribe_wav', type=str,
  71. help='Path to a single .wav file (16KHz)')
  72. io.add_argument('--transcribe_filelist', type=str,
  73. help='Path to a filelist with one .wav path per line')
  74. io.add_argument('-o', '--output_dir', default='results/',
  75. help='Output folder to save audio (file per phrase)')
  76. io.add_argument('--log_file', type=str, default=None,
  77. help='Path to a DLLogger log file')
  78. io.add_argument('--ema', action='store_true',
  79. help='Load averaged model weights')
  80. io.add_argument('--torchscript', action='store_true',
  81. help='Evaluate with a TorchScripted model')
  82. io.add_argument('--torchscript_export', action='store_true',
  83. help='Export the model with torch.jit to the output_dir')
  84. io.add_argument('--override_config', type=str, action='append',
  85. help='Overrides arbitrary config value.'
  86. ' Syntax: `--override_config nested.config.key=val`.')
  87. return parser
  88. def durs_to_percentiles(durations, ratios):
  89. durations = np.asarray(durations) * 1000 # in ms
  90. latency = durations
  91. latency = latency[5:]
  92. mean_latency = np.mean(latency)
  93. latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency)
  94. latency_ranges = get_percentile(ratios, latency_worst, len(latency))
  95. latency_ranges[0.5] = mean_latency
  96. return latency_ranges
  97. def get_percentile(ratios, arr, nsamples):
  98. res = {}
  99. for a in ratios:
  100. idx = max(int(nsamples * (1 - a)), 0)
  101. res[a] = arr[idx]
  102. return res
  103. def torchscript_export(data_loader, audio_processor, model, greedy_decoder,
  104. output_dir, use_amp, use_conv_masks, model_config, device,
  105. save):
  106. audio_processor.to(device)
  107. for batch in data_loader:
  108. batch = [t.to(device, non_blocking=True) for t in batch]
  109. audio, audio_len, _, _ = batch
  110. feats, feat_lens = audio_processor(audio, audio_len)
  111. break
  112. print("\nExporting featurizer...")
  113. print("\nNOTE: Dithering causes warnings about non-determinism.\n")
  114. ts_feat = torch.jit.trace(audio_processor, (audio, audio_len))
  115. print("\nExporting acoustic model...")
  116. model(feats, feat_lens)
  117. ts_acoustic = torch.jit.trace(model, (feats, feat_lens))
  118. print("\nExporting decoder...")
  119. log_probs = model(feats, feat_lens)
  120. ts_decoder = torch.jit.script(greedy_decoder, log_probs)
  121. print("\nJIT export complete.")
  122. if save:
  123. precision = "fp16" if use_amp else "fp32"
  124. module_name = f'{os.path.basename(model_config)}_{precision}'
  125. ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt"))
  126. ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt"))
  127. ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt"))
  128. return ts_feat, ts_acoustic, ts_decoder
  129. def main():
  130. parser = get_parser()
  131. args = parser.parse_args()
  132. log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
  133. dllogger.init(backends=[
  134. JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
  135. JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
  136. StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
  137. ])
  138. [dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
  139. for step in ['DNN', 'data+DNN', 'data']:
  140. for c in [0.99, 0.95, 0.9, 0.5]:
  141. cs = 'avg' if c == 0.5 else f'{int(100*c)}%'
  142. dllogger.metadata(f'{step.lower()}_latency_{c}',
  143. {'name': f'{step} latency {cs}',
  144. 'format': ':>7.2f', 'unit': 'ms'})
  145. dllogger.metadata(
  146. 'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'})
  147. if args.cpu:
  148. device = torch.device('cpu')
  149. else:
  150. assert torch.cuda.is_available()
  151. device = torch.device('cuda')
  152. torch.backends.cudnn.benchmark = args.cudnn_benchmark
  153. if args.seed is not None:
  154. torch.manual_seed(args.seed + args.local_rank)
  155. np.random.seed(args.seed + args.local_rank)
  156. random.seed(args.seed + args.local_rank)
  157. # set up distributed training
  158. multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
  159. if multi_gpu:
  160. torch.cuda.set_device(args.local_rank)
  161. distrib.init_process_group(backend='nccl', init_method='env://')
  162. print_once(f'Inference with {distrib.get_world_size()} GPUs')
  163. if args.ckpt is not None:
  164. print(f'Loading the model from {args.ckpt} ...')
  165. print(f'{args.model_config} will be overriden.')
  166. if args.ckpt.lower().endswith('.nemo'):
  167. ckpt, cfg = load_nemo_ckpt(args.ckpt)
  168. else:
  169. cfg = config.load(args.model_config)
  170. ckpt = torch.load(args.ckpt, map_location='cpu')
  171. sd_key = 'ema_state_dict' if args.ema else 'state_dict'
  172. if args.ema and 'ema_state_dict' not in ckpt:
  173. print(f'WARNING: EMA weights are unavailable in {args.ckpt}.')
  174. sd_key = 'state_dict'
  175. state_dict = ckpt[sd_key]
  176. else:
  177. cfg = config.load(args.model_config)
  178. state_dict = None
  179. config.apply_config_overrides(cfg, args)
  180. symbols = helpers.add_ctc_blank(cfg['labels'])
  181. use_dali = args.dali_device in ('cpu', 'gpu')
  182. dataset_kw, features_kw = config.input(cfg, 'val')
  183. measure_perf = args.steps > 0
  184. # dataset
  185. if args.transcribe_wav or args.transcribe_filelist:
  186. if use_dali:
  187. print("DALI supported only with input .json files; disabling")
  188. use_dali = False
  189. assert not cfg['input_val']['audio_dataset'].get('pad_to_max_duration', False)
  190. assert not (args.transcribe_wav and args.transcribe_filelist)
  191. if args.transcribe_wav:
  192. dataset = SingleAudioDataset(args.transcribe_wav)
  193. else:
  194. dataset = FilelistDataset(args.transcribe_filelist)
  195. data_loader = get_data_loader(dataset,
  196. batch_size=1,
  197. multi_gpu=multi_gpu,
  198. shuffle=False,
  199. num_workers=0,
  200. drop_last=(True if measure_perf else False))
  201. _, features_kw = config.input(cfg, 'val')
  202. feat_proc = FilterbankFeatures(**features_kw)
  203. elif use_dali:
  204. # pad_to_max_duration is not supported by DALI - have simple padders
  205. if features_kw['pad_to_max_duration']:
  206. feat_proc = BaseFeatures(
  207. pad_align=features_kw['pad_align'],
  208. pad_to_max_duration=True,
  209. max_duration=features_kw['max_duration'],
  210. sample_rate=features_kw['sample_rate'],
  211. window_size=features_kw['window_size'],
  212. window_stride=features_kw['window_stride'])
  213. features_kw['pad_to_max_duration'] = False
  214. else:
  215. feat_proc = None
  216. data_loader = DaliDataLoader(
  217. gpu_id=args.local_rank or 0,
  218. dataset_path=args.dataset_dir,
  219. config_data=dataset_kw,
  220. config_features=features_kw,
  221. json_names=args.val_manifests,
  222. batch_size=args.batch_size,
  223. pipeline_type=("train" if measure_perf else "val"), # no drop_last
  224. device_type=args.dali_device,
  225. symbols=symbols)
  226. else:
  227. dataset = AudioDataset(args.dataset_dir,
  228. args.val_manifests,
  229. symbols,
  230. **dataset_kw)
  231. data_loader = get_data_loader(dataset,
  232. args.batch_size,
  233. multi_gpu=multi_gpu,
  234. shuffle=False,
  235. num_workers=4,
  236. drop_last=False)
  237. feat_proc = FilterbankFeatures(**features_kw)
  238. model = QuartzNet(encoder_kw=config.encoder(cfg),
  239. decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
  240. if state_dict is not None:
  241. model.load_state_dict(state_dict, strict=True)
  242. model.to(device)
  243. model.eval()
  244. if feat_proc is not None:
  245. feat_proc.to(device)
  246. feat_proc.eval()
  247. if args.amp:
  248. model = model.half()
  249. if args.torchscript:
  250. greedy_decoder = GreedyCTCDecoder()
  251. feat_proc, model, greedy_decoder = torchscript_export(
  252. data_loader, feat_proc, model, greedy_decoder, args.output_dir,
  253. use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml,
  254. device=device, save=args.torchscript_export)
  255. if multi_gpu:
  256. model = torch.nn.parallel.DistributedDataParallel(
  257. model, device_ids=[args.local_rank], output_device=args.local_rank)
  258. agg = {'txts': [], 'preds': [], 'logits': []}
  259. dur = {'data': [], 'dnn': [], 'data+dnn': []}
  260. looped_loader = chain.from_iterable(repeat(data_loader))
  261. greedy_decoder = GreedyCTCDecoder()
  262. sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
  263. steps = args.steps + args.warmup_steps or len(data_loader)
  264. with torch.no_grad():
  265. for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)):
  266. if use_dali:
  267. feats, feat_lens, txt, txt_lens = batch
  268. if feat_proc is not None:
  269. feats, feat_lens = feat_proc(feats, feat_lens)
  270. else:
  271. batch = [t.to(device, non_blocking=True) for t in batch]
  272. audio, audio_lens, txt, txt_lens = batch
  273. feats, feat_lens = feat_proc(audio, audio_lens)
  274. sync()
  275. t1 = time.time()
  276. if args.amp:
  277. feats = feats.half()
  278. if model.encoder.use_conv_masks:
  279. log_probs, log_prob_lens = model(feats, feat_lens)
  280. else:
  281. log_probs = model(feats, feat_lens)
  282. preds = greedy_decoder(log_probs)
  283. sync()
  284. t2 = time.time()
  285. # burn-in period; wait for a new loader due to num_workers
  286. if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
  287. dur['data'].append(t1 - t0)
  288. dur['dnn'].append(t2 - t1)
  289. dur['data+dnn'].append(t2 - t0)
  290. if txt is not None:
  291. agg['txts'] += helpers.gather_transcripts([txt], [txt_lens],
  292. symbols)
  293. agg['preds'] += helpers.gather_predictions([preds], symbols)
  294. agg['logits'].append(log_probs)
  295. if it + 1 == steps:
  296. break
  297. sync()
  298. t0 = time.time()
  299. # communicate the results
  300. if args.transcribe_wav:
  301. for idx, p in enumerate(agg['preds']):
  302. print_once(f'Prediction {idx+1: >3}: {p}')
  303. elif args.transcribe_filelist:
  304. pass
  305. elif not multi_gpu or distrib.get_rank() == 0:
  306. wer, _ = process_evaluation_epoch(agg)
  307. dllogger.log(step=(), data={'eval_wer': 100 * wer})
  308. if args.save_predictions:
  309. with open(args.save_predictions, 'w') as f:
  310. f.write('\n'.join(agg['preds']))
  311. if args.save_logits:
  312. logits = torch.cat(agg['logits'], dim=0).cpu()
  313. torch.save(logits, args.save_logits)
  314. # report timings
  315. if len(dur['data']) >= 20:
  316. ratios = [0.9, 0.95, 0.99]
  317. for stage in dur:
  318. lat = durs_to_percentiles(dur[stage], ratios)
  319. for k in [0.99, 0.95, 0.9, 0.5]:
  320. kk = str(k).replace('.', '_')
  321. dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]})
  322. else:
  323. print_once('Not enough samples to measure latencies.')
  324. if __name__ == "__main__":
  325. main()