run_pretraining.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import time
  16. import logging
  17. import paddle
  18. import paddle.distributed.fleet as fleet
  19. from utils.config import parse_args, print_args
  20. from utils.save_load import init_program
  21. from utils.logger import setup_loggers
  22. from utils.affinity import set_cpu_affinity
  23. from utils.utility import set_seed, get_trainer_id, get_num_trainers
  24. import program
  25. import dllogger
  26. from lddl.paddle import get_bert_pretrain_data_loader
  27. def main():
  28. """
  29. An enterpoint to train a BERT model, which contains five steps.
  30. 1. Parse arguments from command line.
  31. 2. Initialize distributed training related setting, including CPU affinity.
  32. 3. Create training Paddle.static.Program.
  33. 4. Load checkpoint or pretrained model if given.
  34. 5. Run program (train with datasets and save model if necessary).
  35. """
  36. now = time.time()
  37. args = parse_args()
  38. setup_loggers(args.report_file)
  39. if args.show_config:
  40. print_args(args)
  41. device = paddle.set_device('gpu')
  42. fleet.init(is_collective=True)
  43. if args.enable_cpu_affinity:
  44. set_cpu_affinity()
  45. # Create the random seed for the worker
  46. set_seed(args.seed + get_trainer_id())
  47. dllogger.log(step="PARAMETER", data={"SEED": args.seed})
  48. dllogger.log(step="PARAMETER", data={"train_start": True})
  49. dllogger.log(step="PARAMETER",
  50. data={"batch_size_per_gpu": args.batch_size})
  51. dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate})
  52. main_program = paddle.static.default_main_program()
  53. startup_program = paddle.static.default_startup_program()
  54. model, lr_scheduler, optimizer, loss, feeds = program.build(
  55. args, main_program, startup_program)
  56. exe = paddle.static.Executor(device)
  57. exe.run(startup_program)
  58. progress = init_program(args, program=main_program, exe=exe, model=model)
  59. train_dataloader = get_bert_pretrain_data_loader(
  60. args.input_dir,
  61. vocab_file=args.vocab_file,
  62. data_loader_kwargs={
  63. 'batch_size': args.batch_size,
  64. 'num_workers': args.num_workers,
  65. 'persistent_workers': True,
  66. 'feed_list': feeds
  67. },
  68. base_seed=args.seed,
  69. log_dir=None if args.output_dir is None else
  70. os.path.join(args.output_dir, 'lddl_log'),
  71. log_level=logging.WARNING,
  72. start_epoch=0 if progress is None else progress.get("epoch", 0), )
  73. if args.amp:
  74. optimizer.amp_init(device)
  75. global_step, final_loss, train_time_raw = program.run(
  76. exe, main_program, args, lr_scheduler, loss, train_dataloader,
  77. progress)
  78. if get_trainer_id() == 0:
  79. e2e_time = time.time() - now
  80. if args.benchmark:
  81. training_perf = args.batch_size * args.gradient_merge_steps * (
  82. global_step - args.benchmark_warmup_steps
  83. ) * get_num_trainers() / train_time_raw
  84. else:
  85. training_perf = args.batch_size * args.gradient_merge_steps * global_step * get_num_trainers(
  86. ) / train_time_raw
  87. dllogger.log(step=tuple(),
  88. data={
  89. "e2e_train_time": e2e_time,
  90. "training_sequences_per_second": training_perf,
  91. "final_loss": final_loss,
  92. "raw_train_time": train_time_raw
  93. })
  94. if __name__ == "__main__":
  95. paddle.enable_static()
  96. main()