classification-results.sh 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #!/usr/bin/env bash
  2. #
  3. # Copyright (c) 2016-present, Facebook, Inc.
  4. # All rights reserved.
  5. #
  6. # This source code is licensed under the BSD-style license found in the
  7. # LICENSE file in the root directory of this source tree. An additional grant
  8. # of patent rights can be found in the PATENTS file in the same directory.
  9. #
  10. # This script produces the results from Table 1 in the following paper:
  11. # Bag of Tricks for Efficient Text Classification, arXiv 1607.01759, 2016
  12. myshuf() {
  13. perl -MList::Util=shuffle -e 'print shuffle(<>);' "$@";
  14. }
  15. normalize_text() {
  16. tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \
  17. sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' \
  18. -e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
  19. -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' | tr -s " " | myshuf
  20. }
  21. DATASET=(
  22. ag_news
  23. sogou_news
  24. dbpedia
  25. yelp_review_polarity
  26. yelp_review_full
  27. yahoo_answers
  28. amazon_review_full
  29. amazon_review_polarity
  30. )
  31. ID=(
  32. 0Bz8a_Dbh9QhbUDNpeUdjb0wxRms # ag_news
  33. 0Bz8a_Dbh9QhbUkVqNEszd0pHaFE # sogou_news
  34. 0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k # dbpedia
  35. 0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg # yelp_review_polarity
  36. 0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0 # yelp_review_full
  37. 0Bz8a_Dbh9Qhbd2JNdDBsQUdocVU # yahoo_answers
  38. 0Bz8a_Dbh9QhbZVhsUnRWRDhETzA # amazon_review_full
  39. 0Bz8a_Dbh9QhbaW12WVVZS2drcnM # amazon_review_polarity
  40. )
  41. # These learning rates were chosen by validation on a subset of the training set.
  42. LR=( 0.25 0.5 0.5 0.1 0.1 0.1 0.05 0.05 )
  43. RESULTDIR=result
  44. DATADIR=data
  45. mkdir -p "${RESULTDIR}"
  46. mkdir -p "${DATADIR}"
  47. # Small datasets first
  48. for i in {0..0}
  49. do
  50. echo "Downloading dataset ${DATASET[i]}"
  51. if [ ! -f "${DATADIR}/${DATASET[i]}.train" ]
  52. then
  53. wget -c "https://drive.google.com/uc?export=download&id=${ID[i]}" -O "${DATADIR}/${DATASET[i]}_csv.tar.gz"
  54. tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
  55. cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
  56. cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
  57. fi
  58. done
  59. # Large datasets require a bit more work due to the extra request page
  60. for i in {1..7}
  61. do
  62. echo "Downloading dataset ${DATASET[i]}"
  63. if [ ! -f "${DATADIR}/${DATASET[i]}.train" ]
  64. then
  65. curl -c /tmp/cookies "https://drive.google.com/uc?export=download&id=${ID[i]}" > /tmp/intermezzo.html
  66. curl -L -b /tmp/cookies "https://drive.google.com$(cat /tmp/intermezzo.html | grep -Po 'uc-download-link" [^>]* href="\K[^"]*' | sed 's/\&amp;/\&/g')" > "${DATADIR}/${DATASET[i]}_csv.tar.gz"
  67. tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
  68. cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
  69. cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
  70. fi
  71. done
  72. make
  73. for i in {0..7}
  74. do
  75. echo "Working on dataset ${DATASET[i]}"
  76. ./fasttext supervised -input "${DATADIR}/${DATASET[i]}.train" \
  77. -output "${RESULTDIR}/${DATASET[i]}" -dim 10 -lr "${LR[i]}" -wordNgrams 2 \
  78. -minCount 1 -bucket 10000000 -epoch 5 -thread 4 > /dev/null
  79. ./fasttext test "${RESULTDIR}/${DATASET[i]}.bin" \
  80. "${DATADIR}/${DATASET[i]}.test"
  81. done