losses.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import tensorflow as tf
  15. class DiceLoss(tf.keras.losses.Loss):
  16. def __init__(self, y_one_hot=True, reduce_batch=False, eps=1e-6, include_background=False):
  17. super().__init__()
  18. self.y_one_hot = y_one_hot
  19. self.reduce_batch = reduce_batch
  20. self.eps = eps
  21. self.include_background = include_background
  22. def dice_coef(self, y_true, y_pred):
  23. intersection = tf.reduce_sum(y_true * y_pred, axis=1)
  24. pred_sum = tf.reduce_sum(y_pred, axis=1)
  25. true_sum = tf.reduce_sum(y_true, axis=1)
  26. dice = (2.0 * intersection + self.eps) / (pred_sum + true_sum + self.eps)
  27. return tf.reduce_mean(dice, axis=0)
  28. @tf.function
  29. def call(self, y_true, y_pred):
  30. n_class = y_pred.shape[-1]
  31. if self.reduce_batch:
  32. flat_shape = (1, -1, n_class)
  33. else:
  34. flat_shape = (y_pred.shape[0], -1, n_class)
  35. if self.y_one_hot:
  36. y_true = tf.one_hot(y_true, n_class)
  37. flat_pred = tf.reshape(tf.cast(y_pred, tf.float32), flat_shape)
  38. flat_true = tf.reshape(y_true, flat_shape)
  39. dice_coefs = self.dice_coef(flat_true, tf.keras.activations.softmax(flat_pred, axis=-1))
  40. if not self.include_background:
  41. dice_coefs = dice_coefs[1:]
  42. dice_loss = tf.reduce_mean(1 - dice_coefs)
  43. return dice_loss
  44. class DiceCELoss(tf.keras.losses.Loss):
  45. def __init__(self, y_one_hot=True, **dice_kwargs):
  46. super().__init__()
  47. self.y_one_hot = y_one_hot
  48. self.dice_loss = DiceLoss(y_one_hot=False, **dice_kwargs)
  49. @tf.function
  50. def call(self, y_true, y_pred):
  51. y_pred = tf.cast(y_pred, tf.float32)
  52. n_class = y_pred.shape[-1]
  53. if self.y_one_hot:
  54. y_true = tf.one_hot(y_true, n_class)
  55. dice_loss = self.dice_loss(y_true, y_pred)
  56. ce_loss = tf.reduce_mean(
  57. tf.nn.softmax_cross_entropy_with_logits(
  58. labels=y_true,
  59. logits=y_pred,
  60. )
  61. )
  62. return dice_loss + ce_loss
  63. class WeightDecay:
  64. def __init__(self, factor):
  65. self.factor = factor
  66. @tf.function
  67. def __call__(self, model):
  68. # TODO: add_n -> accumulate_n ?
  69. return self.factor * tf.add_n([tf.nn.l2_loss(v) for v in model.trainable_variables if "norm" not in v.name])