config.yaml 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. # project root working directory, automatically read by hydra (.../UNet3P)
  2. WORK_DIR: ${hydra:runtime.cwd}
  3. DATA_PREPARATION:
  4. # unprocessed LiTS scan data paths, for custom data training skip this section details
  5. SCANS_TRAIN_DATA_PATH: "/data/Training Batch 2/"
  6. SCANS_VAL_DATA_PATH: "/data/Training Batch 1/"
  7. # Resize scans to model input size
  8. RESIZED_HEIGHT: ${INPUT.HEIGHT}
  9. RESIZED_WIDTH: ${INPUT.WIDTH}
  10. # Clip scans value in given range
  11. SCAN_MIN_VALUE: -200
  12. SCAN_MAX_VALUE: 250
  13. DATASET:
  14. # paths should be relative from project root path
  15. TRAIN:
  16. IMAGES_PATH: "/data/train/images"
  17. MASK_PATH: "/data/train/mask"
  18. VAL:
  19. IMAGES_PATH: "/data/val/images"
  20. MASK_PATH: "/data/val/mask"
  21. MODEL:
  22. # available variants are unet3plus, unet3plus_deepsup, unet3plus_deepsup_cgm
  23. TYPE: "unet3plus"
  24. WEIGHTS_FILE_NAME: model_${MODEL.TYPE}
  25. BACKBONE:
  26. # available variants are unet3plus, vgg16, vgg19
  27. TYPE: "vgg19"
  28. DATA_GENERATOR_TYPE: "DALI_GENERATOR" # options are TF_GENERATOR or DALI_GENERATOR
  29. SEED: 5 # for result's reproducibility
  30. VERBOSE: 1 # For logs printing details, available options are 0, 1, 2
  31. DATALOADER_WORKERS: 3 # number of workers used for data loading
  32. SHOW_CENTER_CHANNEL_IMAGE: True # only true for UNet3+ for custom dataset it should be False
  33. # Model input shape
  34. INPUT:
  35. HEIGHT: 320
  36. WIDTH: 320
  37. CHANNELS: 3
  38. # Model output classes
  39. OUTPUT:
  40. CLASSES: 2
  41. HYPER_PARAMETERS:
  42. EPOCHS: 100
  43. BATCH_SIZE: 16 # specify per gpu batch size
  44. LEARNING_RATE: 5e-5 # 0.1, 1e-3, 3e-4, 5e-5
  45. CALLBACKS:
  46. # paths should be relative from project root path
  47. TENSORBOARD:
  48. PATH: "/checkpoint/tb_logs"
  49. EARLY_STOPPING:
  50. PATIENCE: 100
  51. MODEL_CHECKPOINT:
  52. PATH: "/checkpoint"
  53. SAVE_WEIGHTS_ONLY: True
  54. SAVE_BEST_ONLY: True
  55. CSV_LOGGER:
  56. PATH: "/checkpoint"
  57. APPEND_LOGS: False
  58. PREPROCESS_DATA:
  59. RESIZE:
  60. VALUE: False # if True, resize to input height and width
  61. HEIGHT: ${INPUT.HEIGHT}
  62. WIDTH: ${INPUT.WIDTH}
  63. IMAGE_PREPROCESSING_TYPE: "normalize"
  64. NORMALIZE_MASK:
  65. VALUE: False # if True, divide mask by given value
  66. NORMALIZE_VALUE: 255
  67. SHUFFLE:
  68. TRAIN:
  69. VALUE: True
  70. VAL:
  71. VALUE: False
  72. USE_MULTI_GPUS:
  73. VALUE: True # If True use multiple gpus for training
  74. # GPU_IDS: Could be integer or list of integers.
  75. # In case Integer: if integer value is -1 then it uses all available gpus.
  76. # otherwise if positive number, then use given number of gpus.
  77. # In case list of Integers: each integer will be considered as gpu id
  78. # e.g. [4, 5, 7] means use gpu 5,6 and 8 for training/evaluation
  79. GPU_IDS: -1
  80. OPTIMIZATION:
  81. AMP: True # Automatic Mixed Precision(AMP)
  82. XLA: True # Accelerated Linear Algebra(XLA)
  83. # to stop hydra from storing logs files
  84. # logs will be stored in outputs directory
  85. defaults:
  86. - _self_
  87. - override hydra/hydra_logging: disabled
  88. - override hydra/job_logging: disabled
  89. hydra:
  90. output_subdir: null