Ver Fonte

[Maskrcnn/PyT] Synchronize before reporting DLL time

Shriya Balaji Palsamudram há 3 anos atrás
pai
commit
c44b7addd1

+ 2 - 4
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/inference.py

@@ -10,9 +10,7 @@ import torch
 from tqdm import tqdm
 
 from maskrcnn_benchmark.data.datasets.evaluation import evaluate
-from ..utils.comm import is_main_process
-from ..utils.comm import all_gather
-from ..utils.comm import synchronize
+from ..utils.comm import is_main_process, all_gather, synchronize, synchronized_timestamp
 
 
 def compute_on_dataset(model, data_loader, device, steps=-1):
@@ -83,7 +81,7 @@ def inference(
     )
     dataset = data_loader.dataset
     dllogger.log(step="PARAMETER", data={"eval_dataset_name": dataset_name, "eval_num_samples":len(dataset)})
-    start_time = time.time()
+    start_time = synchronized_timestamp()
     with torch.autograd.profiler.emit_nvtx(enabled=profile):
         predictions, latency = compute_on_dataset(model, data_loader, device, steps=steps)
     # wait for all processes to complete before measuring the time

+ 4 - 4
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py

@@ -7,7 +7,7 @@ import time
 import torch
 import torch.distributed as dist
 
-from maskrcnn_benchmark.utils.comm import get_world_size
+from maskrcnn_benchmark.utils.comm import get_world_size, synchronized_timestamp
 from maskrcnn_benchmark.utils.metric_logger import MetricLogger
 
 def reduce_loss_dict(loss_dict):
@@ -90,8 +90,8 @@ def do_train(
     prefetcher = Prefetcher(data_loader, device)
     start_iter = arguments["iteration"]
     model.train()
-    start_training_time = time.time()
-    end = time.time()
+    start_training_time = synchronized_timestamp()
+    end = start_training_time
     if use_amp:
         scaler = torch.cuda.amp.GradScaler(init_scale=8192.0)
     for iteration, (images, targets) in enumerate(prefetcher, start_iter):
@@ -169,7 +169,7 @@ def do_train(
             if early_exit:
                 break
 
-    total_training_time = time.time() - start_training_time
+    total_training_time = synchronized_timestamp() - start_training_time
     total_time_str = str(datetime.timedelta(seconds=total_training_time))
     dllogger.log(step=tuple(), data={"e2e_train_time": total_training_time,
                                                    "train_perf_fps": max_iter * cfg.SOLVER.IMS_PER_BATCH / total_training_time})

+ 6 - 0
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils/comm.py

@@ -116,3 +116,9 @@ def reduce_dict(input_dict, average=True):
             values /= world_size
         reduced_dict = {k: v for k, v in zip(names, values)}
     return reduced_dict
+
+
+def synchronized_timestamp():
+    torch.cuda.synchronize()
+    return time.time()
+