tokenization.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import collections
  20. import unicodedata
  21. import six
  22. import tensorflow as tf
  23. import re
  24. def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
  25. """Checks whether the casing config is consistent with the checkpoint name."""
  26. # The casing has to be passed in by the user and there is no explicit check
  27. # as to whether it matches the checkpoint. The casing information probably
  28. # should have been stored in the bert_config.json file, but it's not, so
  29. # we have to heuristically detect it to validate.
  30. if not init_checkpoint:
  31. return
  32. m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
  33. if m is None:
  34. return
  35. model_name = m.group(1)
  36. lower_models = [
  37. "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
  38. "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
  39. ]
  40. cased_models = [
  41. "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
  42. "multi_cased_L-12_H-768_A-12"
  43. ]
  44. is_bad_config = False
  45. if model_name in lower_models and not do_lower_case:
  46. is_bad_config = True
  47. actual_flag = "False"
  48. case_name = "lowercased"
  49. opposite_flag = "True"
  50. if model_name in cased_models and do_lower_case:
  51. is_bad_config = True
  52. actual_flag = "True"
  53. case_name = "cased"
  54. opposite_flag = "False"
  55. if is_bad_config:
  56. raise ValueError(
  57. "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
  58. "However, `%s` seems to be a %s model, so you "
  59. "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
  60. "how the model was pre-training. If this error is wrong, please "
  61. "just comment out this check." % (actual_flag, init_checkpoint,
  62. model_name, case_name, opposite_flag))
  63. def convert_to_unicode(text):
  64. """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
  65. if six.PY3:
  66. if isinstance(text, str):
  67. return text
  68. elif isinstance(text, bytes):
  69. return text.decode("utf-8", "ignore")
  70. else:
  71. raise ValueError("Unsupported string type: %s" % (type(text)))
  72. elif six.PY2:
  73. if isinstance(text, str):
  74. return text.decode("utf-8", "ignore")
  75. elif isinstance(text, unicode):
  76. return text
  77. else:
  78. raise ValueError("Unsupported string type: %s" % (type(text)))
  79. else:
  80. raise ValueError("Not running on Python2 or Python 3?")
  81. def printable_text(text):
  82. """Returns text encoded in a way suitable for print or `tf.logging`."""
  83. # These functions want `str` for both Python2 and Python3, but in one case
  84. # it's a Unicode string and in the other it's a byte string.
  85. if six.PY3:
  86. if isinstance(text, str):
  87. return text
  88. elif isinstance(text, bytes):
  89. return text.decode("utf-8", "ignore")
  90. else:
  91. raise ValueError("Unsupported string type: %s" % (type(text)))
  92. elif six.PY2:
  93. if isinstance(text, str):
  94. return text
  95. elif isinstance(text, unicode):
  96. return text.encode("utf-8")
  97. else:
  98. raise ValueError("Unsupported string type: %s" % (type(text)))
  99. else:
  100. raise ValueError("Not running on Python2 or Python 3?")
  101. def load_vocab(vocab_file):
  102. """Loads a vocabulary file into a dictionary."""
  103. vocab = collections.OrderedDict()
  104. index = 0
  105. with tf.gfile.GFile(vocab_file, "r") as reader:
  106. while True:
  107. token = convert_to_unicode(reader.readline())
  108. if not token:
  109. break
  110. token = token.strip()
  111. vocab[token] = index
  112. index += 1
  113. return vocab
  114. def convert_by_vocab(vocab, items):
  115. """Converts a sequence of [tokens|ids] using the vocab."""
  116. output = []
  117. for item in items:
  118. output.append(vocab[item])
  119. return output
  120. def convert_tokens_to_ids(vocab, tokens):
  121. return convert_by_vocab(vocab, tokens)
  122. def convert_ids_to_tokens(inv_vocab, ids):
  123. return convert_by_vocab(inv_vocab, ids)
  124. def whitespace_tokenize(text):
  125. """Runs basic whitespace cleaning and splitting on a piece of text."""
  126. text = text.strip()
  127. if not text:
  128. return []
  129. tokens = text.split()
  130. return tokens
  131. class FullTokenizer(object):
  132. """Runs end-to-end tokenziation."""
  133. def __init__(self, vocab_file, do_lower_case=True):
  134. self.vocab = load_vocab(vocab_file)
  135. self.inv_vocab = {v: k for k, v in self.vocab.items()}
  136. self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
  137. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
  138. def tokenize(self, text):
  139. split_tokens = []
  140. for token in self.basic_tokenizer.tokenize(text):
  141. for sub_token in self.wordpiece_tokenizer.tokenize(token):
  142. split_tokens.append(sub_token)
  143. return split_tokens
  144. def convert_tokens_to_ids(self, tokens):
  145. return convert_by_vocab(self.vocab, tokens)
  146. def convert_ids_to_tokens(self, ids):
  147. return convert_by_vocab(self.inv_vocab, ids)
  148. class BasicTokenizer(object):
  149. """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
  150. def __init__(self, do_lower_case=True):
  151. """Constructs a BasicTokenizer.
  152. Args:
  153. do_lower_case: Whether to lower case the input.
  154. """
  155. self.do_lower_case = do_lower_case
  156. def tokenize(self, text):
  157. """Tokenizes a piece of text."""
  158. text = convert_to_unicode(text)
  159. text = self._clean_text(text)
  160. # This was added on November 1st, 2018 for the multilingual and Chinese
  161. # models. This is also applied to the English models now, but it doesn't
  162. # matter since the English models were not trained on any Chinese data
  163. # and generally don't have any Chinese data in them (there are Chinese
  164. # characters in the vocabulary because Wikipedia does have some Chinese
  165. # words in the English Wikipedia.).
  166. text = self._tokenize_chinese_chars(text)
  167. orig_tokens = whitespace_tokenize(text)
  168. split_tokens = []
  169. for token in orig_tokens:
  170. if self.do_lower_case:
  171. token = token.lower()
  172. token = self._run_strip_accents(token)
  173. split_tokens.extend(self._run_split_on_punc(token))
  174. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  175. return output_tokens
  176. def _run_strip_accents(self, text):
  177. """Strips accents from a piece of text."""
  178. text = unicodedata.normalize("NFD", text)
  179. output = []
  180. for char in text:
  181. cat = unicodedata.category(char)
  182. if cat == "Mn":
  183. continue
  184. output.append(char)
  185. return "".join(output)
  186. def _run_split_on_punc(self, text):
  187. """Splits punctuation on a piece of text."""
  188. chars = list(text)
  189. i = 0
  190. start_new_word = True
  191. output = []
  192. while i < len(chars):
  193. char = chars[i]
  194. if _is_punctuation(char):
  195. output.append([char])
  196. start_new_word = True
  197. else:
  198. if start_new_word:
  199. output.append([])
  200. start_new_word = False
  201. output[-1].append(char)
  202. i += 1
  203. return ["".join(x) for x in output]
  204. def _tokenize_chinese_chars(self, text):
  205. """Adds whitespace around any CJK character."""
  206. output = []
  207. for char in text:
  208. cp = ord(char)
  209. if self._is_chinese_char(cp):
  210. output.append(" ")
  211. output.append(char)
  212. output.append(" ")
  213. else:
  214. output.append(char)
  215. return "".join(output)
  216. def _is_chinese_char(self, cp):
  217. """Checks whether CP is the codepoint of a CJK character."""
  218. # This defines a "chinese character" as anything in the CJK Unicode block:
  219. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  220. #
  221. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  222. # despite its name. The modern Korean Hangul alphabet is a different block,
  223. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  224. # space-separated words, so they are not treated specially and handled
  225. # like the all of the other languages.
  226. if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
  227. (cp >= 0x3400 and cp <= 0x4DBF) or #
  228. (cp >= 0x20000 and cp <= 0x2A6DF) or #
  229. (cp >= 0x2A700 and cp <= 0x2B73F) or #
  230. (cp >= 0x2B740 and cp <= 0x2B81F) or #
  231. (cp >= 0x2B820 and cp <= 0x2CEAF) or
  232. (cp >= 0xF900 and cp <= 0xFAFF) or #
  233. (cp >= 0x2F800 and cp <= 0x2FA1F)): #
  234. return True
  235. return False
  236. def _clean_text(self, text):
  237. """Performs invalid character removal and whitespace cleanup on text."""
  238. output = []
  239. for char in text:
  240. cp = ord(char)
  241. if cp == 0 or cp == 0xfffd or _is_control(char):
  242. continue
  243. if _is_whitespace(char):
  244. output.append(" ")
  245. else:
  246. output.append(char)
  247. return "".join(output)
  248. class WordpieceTokenizer(object):
  249. """Runs WordPiece tokenziation."""
  250. def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
  251. self.vocab = vocab
  252. self.unk_token = unk_token
  253. self.max_input_chars_per_word = max_input_chars_per_word
  254. def tokenize(self, text):
  255. """Tokenizes a piece of text into its word pieces.
  256. This uses a greedy longest-match-first algorithm to perform tokenization
  257. using the given vocabulary.
  258. For example:
  259. input = "unaffable"
  260. output = ["un", "##aff", "##able"]
  261. Args:
  262. text: A single token or whitespace separated tokens. This should have
  263. already been passed through `BasicTokenizer.
  264. Returns:
  265. A list of wordpiece tokens.
  266. """
  267. text = convert_to_unicode(text)
  268. output_tokens = []
  269. for token in whitespace_tokenize(text):
  270. chars = list(token)
  271. if len(chars) > self.max_input_chars_per_word:
  272. output_tokens.append(self.unk_token)
  273. continue
  274. is_bad = False
  275. start = 0
  276. sub_tokens = []
  277. while start < len(chars):
  278. end = len(chars)
  279. cur_substr = None
  280. while start < end:
  281. substr = "".join(chars[start:end])
  282. if start > 0:
  283. substr = "##" + substr
  284. if substr in self.vocab:
  285. cur_substr = substr
  286. break
  287. end -= 1
  288. if cur_substr is None:
  289. is_bad = True
  290. break
  291. sub_tokens.append(cur_substr)
  292. start = end
  293. if is_bad:
  294. output_tokens.append(self.unk_token)
  295. else:
  296. output_tokens.extend(sub_tokens)
  297. return output_tokens
  298. def _is_whitespace(char):
  299. """Checks whether `chars` is a whitespace character."""
  300. # \t, \n, and \r are technically contorl characters but we treat them
  301. # as whitespace since they are generally considered as such.
  302. if char == " " or char == "\t" or char == "\n" or char == "\r":
  303. return True
  304. cat = unicodedata.category(char)
  305. if cat == "Zs":
  306. return True
  307. return False
  308. def _is_control(char):
  309. """Checks whether `chars` is a control character."""
  310. # These are technically control characters but we count them as whitespace
  311. # characters.
  312. if char == "\t" or char == "\n" or char == "\r":
  313. return False
  314. cat = unicodedata.category(char)
  315. if cat in ("Cc", "Cf"):
  316. return True
  317. return False
  318. def _is_punctuation(char):
  319. """Checks whether `chars` is a punctuation character."""
  320. cp = ord(char)
  321. # We treat all non-letter/number ASCII as punctuation.
  322. # Characters such as "^", "$", and "`" are not in the Unicode
  323. # Punctuation class but we treat them as punctuation anyways, for
  324. # consistency.
  325. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
  326. (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
  327. return True
  328. cat = unicodedata.category(char)
  329. if cat.startswith("P"):
  330. return True
  331. return False