tokenization_test.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. import os
  19. import tempfile
  20. import tokenization
  21. import six
  22. import tensorflow as tf
  23. class TokenizationTest(tf.test.TestCase):
  24. def test_full_tokenizer(self):
  25. vocab_tokens = [
  26. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  27. "##ing", ","
  28. ]
  29. with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
  30. vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
  31. vocab_file = vocab_writer.name
  32. tokenizer = tokenization.FullTokenizer(vocab_file)
  33. os.unlink(vocab_file)
  34. tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
  35. self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
  36. self.assertAllEqual(
  37. tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
  38. def test_chinese(self):
  39. tokenizer = tokenization.BasicTokenizer()
  40. self.assertAllEqual(
  41. tokenizer.tokenize(u"ah\u535A\u63A8zz"),
  42. [u"ah", u"\u535A", u"\u63A8", u"zz"])
  43. def test_basic_tokenizer_lower(self):
  44. tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
  45. self.assertAllEqual(
  46. tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
  47. ["hello", "!", "how", "are", "you", "?"])
  48. self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
  49. def test_basic_tokenizer_no_lower(self):
  50. tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
  51. self.assertAllEqual(
  52. tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
  53. ["HeLLo", "!", "how", "Are", "yoU", "?"])
  54. def test_wordpiece_tokenizer(self):
  55. vocab_tokens = [
  56. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  57. "##ing"
  58. ]
  59. vocab = {}
  60. for (i, token) in enumerate(vocab_tokens):
  61. vocab[token] = i
  62. tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
  63. self.assertAllEqual(tokenizer.tokenize(""), [])
  64. self.assertAllEqual(
  65. tokenizer.tokenize("unwanted running"),
  66. ["un", "##want", "##ed", "runn", "##ing"])
  67. self.assertAllEqual(
  68. tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
  69. def test_convert_tokens_to_ids(self):
  70. vocab_tokens = [
  71. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  72. "##ing"
  73. ]
  74. vocab = {}
  75. for (i, token) in enumerate(vocab_tokens):
  76. vocab[token] = i
  77. self.assertAllEqual(
  78. tokenization.convert_tokens_to_ids(
  79. vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
  80. def test_is_whitespace(self):
  81. self.assertTrue(tokenization._is_whitespace(u" "))
  82. self.assertTrue(tokenization._is_whitespace(u"\t"))
  83. self.assertTrue(tokenization._is_whitespace(u"\r"))
  84. self.assertTrue(tokenization._is_whitespace(u"\n"))
  85. self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
  86. self.assertFalse(tokenization._is_whitespace(u"A"))
  87. self.assertFalse(tokenization._is_whitespace(u"-"))
  88. def test_is_control(self):
  89. self.assertTrue(tokenization._is_control(u"\u0005"))
  90. self.assertFalse(tokenization._is_control(u"A"))
  91. self.assertFalse(tokenization._is_control(u" "))
  92. self.assertFalse(tokenization._is_control(u"\t"))
  93. self.assertFalse(tokenization._is_control(u"\r"))
  94. self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
  95. def test_is_punctuation(self):
  96. self.assertTrue(tokenization._is_punctuation(u"-"))
  97. self.assertTrue(tokenization._is_punctuation(u"$"))
  98. self.assertTrue(tokenization._is_punctuation(u"`"))
  99. self.assertTrue(tokenization._is_punctuation(u"."))
  100. self.assertFalse(tokenization._is_punctuation(u"A"))
  101. self.assertFalse(tokenization._is_punctuation(u" "))
  102. if __name__ == "__main__":
  103. tf.test.main()