data_load.py 3.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  2. # Redistribution and use in source and binary forms, with or without
  3. # modification, are permitted provided that the following conditions are met:
  4. # * Redistributions of source code must retain the above copyright
  5. # notice, this list of conditions and the following disclaimer.
  6. # * Redistributions in binary form must reproduce the above copyright
  7. # notice, this list of conditions and the following disclaimer in the
  8. # documentation and/or other materials provided with the distribution.
  9. # * Neither the name of the NVIDIA CORPORATION nor the
  10. # names of its contributors may be used to endorse or promote products
  11. # derived from this software without specific prior written permission.
  12. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  13. # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  14. # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  15. # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
  16. # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  17. # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  18. # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  19. # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  20. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  21. # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  22. import numpy as np
  23. import torch
  24. from torch.utils.data import DataLoader
  25. class PadDataLoader(DataLoader):
  26. @staticmethod
  27. def pad_collate_fn(batch):
  28. """
  29. Apply zero-padding.
  30. """
  31. # TODO refactor
  32. result = dict()
  33. for key in batch[0].keys():
  34. # apply padding on dataset
  35. sub_batch = [elem[key] for elem in batch]
  36. # check diff dims
  37. if not isinstance(sub_batch[0], np.ndarray):
  38. # if list of float or int
  39. assert all([type(x) == type(sub_batch[0]) for x in sub_batch[1:]]), sub_batch
  40. if isinstance(sub_batch[0], int):
  41. sub_batch = torch.LongTensor(sub_batch)
  42. elif isinstance(sub_batch[0], float):
  43. sub_batch = torch.DoubleTensor(sub_batch)
  44. elif any(list(map(lambda x: x.shape != sub_batch[0].shape, sub_batch[1:]))):
  45. sub_batch = torch.from_numpy(__class__.pad_zero(sub_batch))
  46. else:
  47. sub_batch = torch.from_numpy(np.concatenate(np.expand_dims(sub_batch, axis=0)))
  48. result[key] = sub_batch
  49. return result
  50. def __init__(self, dataset, batch_size, num_workers, shuffle=True, pin_memory=True, drop_last=True):
  51. super().__init__(dataset,
  52. batch_size=batch_size,
  53. shuffle=shuffle,
  54. num_workers=num_workers,
  55. pin_memory=pin_memory,
  56. collate_fn=self.pad_collate_fn,
  57. drop_last=drop_last
  58. )
  59. @staticmethod
  60. def pad_zero(sub_batch):
  61. dims = [b.shape for b in sub_batch]
  62. max_dims = list(dims[0])
  63. for d_li in dims[1:]:
  64. for d_idx in range(len(d_li)):
  65. if max_dims[d_idx] < d_li[d_idx]:
  66. max_dims[d_idx] = d_li[d_idx]
  67. temp = np.zeros((len(sub_batch), *max_dims), dtype=sub_batch[0].dtype)
  68. for i, b in enumerate(sub_batch):
  69. if len(b.shape) == 1:
  70. temp[i, :b.shape[0]] = b
  71. elif len(b.shape) == 2:
  72. temp[i, :b.shape[0], :b.shape[1]] = b
  73. elif len(b.shape) == 3:
  74. temp[i, :b.shape[0], :b.shape[1], :b.shape[2]] = b
  75. else:
  76. raise ValueError
  77. return temp