Kaynağa Gözat

Merge: [GNMT/PyT] Added synchronization before collecting timers, switched to correct averaging when reporting avg throughput

Krzysztof Kudrynski 3 yıl önce
ebeveyn
işleme
ed28348c52

+ 5 - 1
PyTorch/Translation/GNMT/seq2seq/inference/translator.py

@@ -182,6 +182,8 @@ class Translator:
         output = []
 
         for i, (src, indices) in enumerate(loader):
+            if device.type == 'cuda':
+                torch.cuda.synchronize()
             translate_timer = time.time()
             src, src_length = src
             stats['total_enc_len'] = int(src_length.sum())
@@ -207,12 +209,14 @@ class Translator:
                     detok = self.tokenizer.detokenize(pred)
                     output.append(detok)
 
+            if device.type == 'cuda':
+                torch.cuda.synchronize()
             elapsed = time.time() - translate_timer
             batch_time.update(elapsed, batch_size)
 
             total_tokens = stats['total_dec_len'] + stats['total_enc_len']
             ttps = total_tokens / elapsed
-            tot_tok_per_sec.update(ttps, batch_size)
+            tot_tok_per_sec.update(ttps, elapsed)
 
             iterations.update(stats['iters'])
             enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size)

+ 9 - 3
PyTorch/Translation/GNMT/seq2seq/train/trainer.py

@@ -222,6 +222,8 @@ class Seq2SeqTrainer:
 
         batch_size = data_loader.batch_size
 
+        if self.device.type == 'cuda':
+            torch.cuda.synchronize()
         end = time.time()
         for i, (src, tgt) in enumerate(data_loader):
             self.save_counter += 1
@@ -241,12 +243,14 @@ class Seq2SeqTrainer:
             losses_per_sentence.update(loss_per_sentence, batch_size)
 
             # measure elapsed time
+            if self.device.type == 'cuda':
+                torch.cuda.synchronize()
             elapsed = time.time() - end
             batch_time.update(elapsed)
-            src_tok_time.update(num_toks['src'] / elapsed)
-            tgt_tok_time.update(num_toks['tgt'] / elapsed)
+            src_tok_time.update(num_toks['src'] / elapsed, elapsed)
+            tgt_tok_time.update(num_toks['tgt'] / elapsed, elapsed)
             tot_num_toks = num_toks['tgt'] + num_toks['src']
-            tot_tok_time.update(tot_num_toks / elapsed)
+            tot_tok_time.update(tot_num_toks / elapsed, elapsed)
             self.loss = losses_per_token.avg
 
             if training and i in eval_iters:
@@ -298,6 +302,8 @@ class Seq2SeqTrainer:
                         if rank == 0:
                             self.save(identifier=identifier)
 
+            if self.device.type == 'cuda':
+                torch.cuda.synchronize()
             end = time.time()
 
         tot_tok_time.reduce('sum')

+ 4 - 1
PyTorch/Translation/GNMT/seq2seq/utils.py

@@ -132,10 +132,13 @@ def setup_seeds(master_seed, epochs, device):
 
 def barrier():
     """
-    Call torch.distributed.barrier() if distritubed is in use
+    Call torch.distributed.barrier() if distritubed is in use, else calls
+    torch.cuda.synchronize() if CUDA is initialized.
     """
     if torch.distributed.is_available() and torch.distributed.is_initialized():
         torch.distributed.barrier()
+    elif torch.cuda.is_available() and torch.cuda.is_initialized():
+        torch.cuda.synchronize()
 
 
 def get_rank():

+ 1 - 1
PyTorch/Translation/GNMT/train.py

@@ -634,7 +634,7 @@ def main():
     logging.info(f'Total training time {training_time:.0f} s')
 
     table = TrainingTable()
-    avg_training_perf = sum(training_perf) / len(training_perf)
+    avg_training_perf = len(training_perf) / sum(1 / v for v in training_perf)
     table.add(utils.get_world_size(), args.train_batch_size, test_bleu,
               avg_training_perf, training_time)
     if utils.get_rank() == 0:

+ 2 - 4
PyTorch/Translation/GNMT/translate.py

@@ -352,12 +352,10 @@ def main():
             latency_table.write('Inference latency', 'fp16',
                                 relative=relative, reverse_speedup=True)
 
-    avg_throughput = np.array(stats['throughputs']).mean()
-    avg_latency = np.array(stats['runtimes']).mean()
     summary = {
-        'eval_throughput': avg_throughput,
+        'eval_throughput': stats['tokens_per_sec'],
         'eval_bleu': stats['bleu'],
-        'eval_avg_latency': avg_latency,
+        'eval_avg_latency': np.array(stats['runtimes']).mean(),
         }
     for p in args.percentiles:
         summary[f'eval_{p}%_latency'] = np.percentile(stats['runtimes'], p)