1
0

classification-results.sh 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. #!/usr/bin/env bash
  2. #
  3. # Copyright (c) 2016-present, Facebook, Inc.
  4. # All rights reserved.
  5. #
  6. # This source code is licensed under the MIT license found in the
  7. # LICENSE file in the root directory of this source tree.
  8. #
  9. # This script produces the results from Table 1 in the following paper:
  10. # Bag of Tricks for Efficient Text Classification, arXiv 1607.01759, 2016
  11. myshuf() {
  12. perl -MList::Util=shuffle -e 'print shuffle(<>);' "$@";
  13. }
  14. normalize_text() {
  15. tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \
  16. sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' \
  17. -e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
  18. -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' | tr -s " " | myshuf
  19. }
  20. DATASET=(
  21. ag_news
  22. sogou_news
  23. dbpedia
  24. yelp_review_polarity
  25. yelp_review_full
  26. yahoo_answers
  27. amazon_review_full
  28. amazon_review_polarity
  29. )
  30. ID=(
  31. 0Bz8a_Dbh9QhbUDNpeUdjb0wxRms # ag_news
  32. 0Bz8a_Dbh9QhbUkVqNEszd0pHaFE # sogou_news
  33. 0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k # dbpedia
  34. 0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg # yelp_review_polarity
  35. 0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0 # yelp_review_full
  36. 0Bz8a_Dbh9Qhbd2JNdDBsQUdocVU # yahoo_answers
  37. 0Bz8a_Dbh9QhbZVhsUnRWRDhETzA # amazon_review_full
  38. 0Bz8a_Dbh9QhbaW12WVVZS2drcnM # amazon_review_polarity
  39. )
  40. # These learning rates were chosen by validation on a subset of the training set.
  41. LR=( 0.25 0.5 0.5 0.1 0.1 0.1 0.05 0.05 )
  42. RESULTDIR=result
  43. DATADIR=data
  44. mkdir -p "${RESULTDIR}"
  45. mkdir -p "${DATADIR}"
  46. # Small datasets first
  47. for i in {0..0}
  48. do
  49. echo "Downloading dataset ${DATASET[i]}"
  50. if [ ! -f "${DATADIR}/${DATASET[i]}.train" ]
  51. then
  52. wget -c "https://drive.google.com/uc?export=download&id=${ID[i]}" -O "${DATADIR}/${DATASET[i]}_csv.tar.gz"
  53. tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
  54. cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
  55. cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
  56. fi
  57. done
  58. # Large datasets require a bit more work due to the extra request page
  59. for i in {1..7}
  60. do
  61. echo "Downloading dataset ${DATASET[i]}"
  62. if [ ! -f "${DATADIR}/${DATASET[i]}.train" ]
  63. then
  64. curl -c /tmp/cookies "https://drive.google.com/uc?export=download&id=${ID[i]}" > /tmp/intermezzo.html
  65. curl -L -b /tmp/cookies "https://drive.google.com$(cat /tmp/intermezzo.html | grep -Po 'uc-download-link" [^>]* href="\K[^"]*' | sed 's/\&amp;/\&/g')" > "${DATADIR}/${DATASET[i]}_csv.tar.gz"
  66. tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
  67. cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
  68. cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
  69. fi
  70. done
  71. make
  72. for i in {0..7}
  73. do
  74. echo "Working on dataset ${DATASET[i]}"
  75. ./fasttext supervised -input "${DATADIR}/${DATASET[i]}.train" \
  76. -output "${RESULTDIR}/${DATASET[i]}" -dim 10 -lr "${LR[i]}" -wordNgrams 2 \
  77. -minCount 1 -bucket 10000000 -epoch 5 -thread 4 > /dev/null
  78. ./fasttext test "${RESULTDIR}/${DATASET[i]}.bin" \
  79. "${DATADIR}/${DATASET[i]}.test"
  80. done