1
0

quantization-example.sh 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940
  1. myshuf() {
  2. perl -MList::Util=shuffle -e 'print shuffle(<>);' "$@";
  3. }
  4. normalize_text() {
  5. tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \
  6. sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' \
  7. -e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
  8. -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' | tr -s " " | myshuf
  9. }
  10. RESULTDIR=result
  11. DATADIR=data
  12. mkdir -p "${RESULTDIR}"
  13. mkdir -p "${DATADIR}"
  14. if [ ! -f "${DATADIR}/dbpedia.train" ]
  15. then
  16. wget -c "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k" -O "${DATADIR}/dbpedia_csv.tar.gz"
  17. tar -xzvf "${DATADIR}/dbpedia_csv.tar.gz" -C "${DATADIR}"
  18. cat "${DATADIR}/dbpedia_csv/train.csv" | normalize_text > "${DATADIR}/dbpedia.train"
  19. cat "${DATADIR}/dbpedia_csv/test.csv" | normalize_text > "${DATADIR}/dbpedia.test"
  20. fi
  21. make
  22. echo "Training..."
  23. ./fasttext supervised -input "${DATADIR}/dbpedia.train" -output "${RESULTDIR}/dbpedia" -dim 10 -lr 0.1 -wordNgrams 2 -minCount 1 -bucket 10000000 -epoch 5 -thread 4
  24. echo "Quantizing..."
  25. ./fasttext quantize -output "${RESULTDIR}/dbpedia" -input "${DATADIR}/dbpedia.train" -qnorm -retrain -epoch 1 -cutoff 100000
  26. echo "Testing original model..."
  27. ./fasttext test "${RESULTDIR}/dbpedia.bin" "${DATADIR}/dbpedia.test"
  28. echo "Testing quantized model..."
  29. ./fasttext test "${RESULTDIR}/dbpedia.ftz" "${DATADIR}/dbpedia.test"
  30. wc -c < "${RESULTDIR}/dbpedia.bin" | awk '{print "Size of the original model:\t",$1;}'
  31. wc -c < "${RESULTDIR}/dbpedia.ftz" | awk '{print "Size of the quantized model:\t",$1;}'