transformer.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import torch
  15. import torch.nn as nn
  16. import torch.nn.functional as F
  17. from common.utils import mask_from_lens
  18. from common.text.symbols import pad_idx, symbols
  19. class PositionalEmbedding(nn.Module):
  20. def __init__(self, demb):
  21. super(PositionalEmbedding, self).__init__()
  22. self.demb = demb
  23. inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
  24. self.register_buffer('inv_freq', inv_freq)
  25. def forward(self, pos_seq, bsz=None):
  26. sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
  27. pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
  28. if bsz is not None:
  29. return pos_emb[None, :, :].expand(bsz, -1, -1)
  30. else:
  31. return pos_emb[None, :, :]
  32. class PositionwiseFF(nn.Module):
  33. def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
  34. super(PositionwiseFF, self).__init__()
  35. self.d_model = d_model
  36. self.d_inner = d_inner
  37. self.dropout = dropout
  38. self.CoreNet = nn.Sequential(
  39. nn.Linear(d_model, d_inner), nn.ReLU(),
  40. nn.Dropout(dropout),
  41. nn.Linear(d_inner, d_model),
  42. nn.Dropout(dropout),
  43. )
  44. self.layer_norm = nn.LayerNorm(d_model)
  45. self.pre_lnorm = pre_lnorm
  46. def forward(self, inp):
  47. if self.pre_lnorm:
  48. # layer normalization + positionwise feed-forward
  49. core_out = self.CoreNet(self.layer_norm(inp))
  50. # residual connection
  51. output = core_out + inp
  52. else:
  53. # positionwise feed-forward
  54. core_out = self.CoreNet(inp)
  55. # residual connection + layer normalization
  56. output = self.layer_norm(inp + core_out)
  57. return output
  58. class PositionwiseConvFF(nn.Module):
  59. def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
  60. super(PositionwiseConvFF, self).__init__()
  61. self.d_model = d_model
  62. self.d_inner = d_inner
  63. self.dropout = dropout
  64. self.CoreNet = nn.Sequential(
  65. nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
  66. nn.ReLU(),
  67. # nn.Dropout(dropout), # worse convergence
  68. nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
  69. nn.Dropout(dropout),
  70. )
  71. self.layer_norm = nn.LayerNorm(d_model)
  72. self.pre_lnorm = pre_lnorm
  73. def forward(self, inp):
  74. return self._forward(inp)
  75. def _forward(self, inp):
  76. if self.pre_lnorm:
  77. # layer normalization + positionwise feed-forward
  78. core_out = inp.transpose(1, 2)
  79. core_out = self.CoreNet(self.layer_norm(core_out))
  80. core_out = core_out.transpose(1, 2)
  81. # residual connection
  82. output = core_out + inp
  83. else:
  84. # positionwise feed-forward
  85. core_out = inp.transpose(1, 2)
  86. core_out = self.CoreNet(core_out)
  87. core_out = core_out.transpose(1, 2)
  88. # residual connection + layer normalization
  89. output = self.layer_norm(inp + core_out)
  90. return output
  91. class MultiHeadAttn(nn.Module):
  92. def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
  93. pre_lnorm=False):
  94. super(MultiHeadAttn, self).__init__()
  95. self.n_head = n_head
  96. self.d_model = d_model
  97. self.d_head = d_head
  98. self.scale = 1 / (d_head ** 0.5)
  99. self.pre_lnorm = pre_lnorm
  100. self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
  101. self.drop = nn.Dropout(dropout)
  102. self.dropatt = nn.Dropout(dropatt)
  103. self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
  104. self.layer_norm = nn.LayerNorm(d_model)
  105. def forward(self, inp, attn_mask=None):
  106. return self._forward(inp, attn_mask)
  107. def _forward(self, inp, attn_mask=None):
  108. residual = inp
  109. if self.pre_lnorm:
  110. # layer normalization
  111. inp = self.layer_norm(inp)
  112. n_head, d_head = self.n_head, self.d_head
  113. head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1)
  114. head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
  115. head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
  116. head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
  117. q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
  118. k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
  119. v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
  120. attn_score = torch.bmm(q, k.transpose(1, 2))
  121. attn_score.mul_(self.scale)
  122. if attn_mask is not None:
  123. attn_mask = attn_mask.unsqueeze(1)
  124. attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
  125. attn_score.masked_fill_(attn_mask, -float('inf'))
  126. attn_prob = F.softmax(attn_score, dim=2)
  127. attn_prob = self.dropatt(attn_prob)
  128. attn_vec = torch.bmm(attn_prob, v)
  129. attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
  130. attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
  131. inp.size(0), inp.size(1), n_head * d_head)
  132. # linear projection
  133. attn_out = self.o_net(attn_vec)
  134. attn_out = self.drop(attn_out)
  135. if self.pre_lnorm:
  136. # residual connection
  137. output = residual + attn_out
  138. else:
  139. # residual connection + layer normalization
  140. output = self.layer_norm(residual + attn_out)
  141. return output
  142. # disabled; slower
  143. def forward_einsum(self, h, attn_mask=None):
  144. # multihead attention
  145. # [hlen x bsz x n_head x d_head]
  146. c = h
  147. if self.pre_lnorm:
  148. # layer normalization
  149. c = self.layer_norm(c)
  150. head_q = self.q_net(h)
  151. head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
  152. head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
  153. head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
  154. head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
  155. # [bsz x n_head x qlen x klen]
  156. # attn_score = torch.einsum('ibnd,jbnd->bnij', (head_q, head_k))
  157. attn_score = torch.einsum('bind,bjnd->bnij', (head_q, head_k))
  158. attn_score.mul_(self.scale)
  159. if attn_mask is not None and attn_mask.any().item():
  160. attn_score.masked_fill_(attn_mask[:, None, None, :], -float('inf'))
  161. # [bsz x qlen x klen x n_head]
  162. attn_prob = F.softmax(attn_score, dim=3)
  163. attn_prob = self.dropatt(attn_prob)
  164. # [bsz x n_head x qlen x klen] * [klen x bsz x n_head x d_head]
  165. # -> [qlen x bsz x n_head x d_head]
  166. attn_vec = torch.einsum('bnij,bjnd->bind', (attn_prob, head_v))
  167. attn_vec = attn_vec.contiguous().view(
  168. attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
  169. # linear projection
  170. attn_out = self.o_net(attn_vec)
  171. attn_out = self.drop(attn_out)
  172. if self.pre_lnorm:
  173. # residual connection
  174. output = h + attn_out
  175. else:
  176. # residual connection + layer normalization
  177. output = self.layer_norm(h + attn_out)
  178. return output
  179. class TransformerLayer(nn.Module):
  180. def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
  181. **kwargs):
  182. super(TransformerLayer, self).__init__()
  183. self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
  184. self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
  185. pre_lnorm=kwargs.get('pre_lnorm'))
  186. def forward(self, dec_inp, mask=None):
  187. output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
  188. output *= mask
  189. output = self.pos_ff(output)
  190. output *= mask
  191. return output
  192. class FFTransformer(nn.Module):
  193. def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
  194. dropout, dropatt, dropemb=0.0, embed_input=True, d_embed=None,
  195. pre_lnorm=False):
  196. super(FFTransformer, self).__init__()
  197. self.d_model = d_model
  198. self.n_head = n_head
  199. self.d_head = d_head
  200. if embed_input:
  201. self.word_emb = nn.Embedding(len(symbols), d_embed or d_model,
  202. padding_idx=pad_idx)
  203. else:
  204. self.word_emb = None
  205. self.pos_emb = PositionalEmbedding(self.d_model)
  206. self.drop = nn.Dropout(dropemb)
  207. self.layers = nn.ModuleList()
  208. for _ in range(n_layer):
  209. self.layers.append(
  210. TransformerLayer(
  211. n_head, d_model, d_head, d_inner, kernel_size, dropout,
  212. dropatt=dropatt, pre_lnorm=pre_lnorm)
  213. )
  214. def forward(self, dec_inp, seq_lens=None):
  215. if self.word_emb is None:
  216. inp = dec_inp
  217. mask = mask_from_lens(seq_lens).unsqueeze(2)
  218. else:
  219. inp = self.word_emb(dec_inp)
  220. # [bsz x L x 1]
  221. mask = (dec_inp != pad_idx).unsqueeze(2)
  222. pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype)
  223. pos_emb = self.pos_emb(pos_seq) * mask
  224. out = self.drop(inp + pos_emb)
  225. for layer in self.layers:
  226. out = layer(out, mask=mask)
  227. # out = self.drop(out)
  228. return out, mask