Browse Source

removed get-data and moved data getting to specific example scripts

Piotr Bojanowski 9 years ago
parent
commit
e1dd53126c
4 changed files with 86 additions and 71 deletions
  1. 47 4
      classification-results.sh
  2. 23 5
      classification.sh
  3. 0 54
      get-data.sh
  4. 16 8
      get-vectors.sh

+ 47 - 4
classification-results.sh

@@ -8,10 +8,38 @@
 # of patent rights can be found in the PATENTS file in the same directory.
 #
 
-make
+myshuf() {
+  perl -MList::Util=shuffle -e 'print shuffle(<>);' "$@";
+}
+
+normalize_text() {
+  tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \
+    sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' \
+        -e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
+        -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' | tr -s " " | myshuf
+}
 
-DATASET=( ag_news sogou_news dbpedia yelp_review_polarity \
-  yelp_review_full yahoo_answers amazon_review_full amazon_review_polarity )
+DATASET=(
+  ag_news
+  sogou_news
+  dbpedia
+  yelp_review_polarity
+  yelp_review_full
+  yahoo_answers
+  amazon_review_full
+  amazon_review_polarity
+)
+
+ID=(
+  0Bz8a_Dbh9QhbUDNpeUdjb0wxRms # ag_news
+  0Bz8a_Dbh9QhbUkVqNEszd0pHaFE # sogou_news
+  0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k # dbpedia
+  0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg # yelp_review_polarity
+  0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0 # yelp_review_full
+  0Bz8a_Dbh9Qhbd2JNdDBsQUdocVU # yahoo_answers
+  0Bz8a_Dbh9QhbZVhsUnRWRDhETzA # amazon_review_full
+  0Bz8a_Dbh9QhbaW12WVVZS2drcnM # amazon_review_polarity
+)
 
 LR=( 0.25 0.5 0.5 0.1 0.1 0.1 0.05 0.05 )
 
@@ -19,8 +47,23 @@ RESULTDIR=result
 DATADIR=data
 
 mkdir -p "${RESULTDIR}"
+mkdir -p "${DATADIR}"
+
+for i in {0..7}
+do
+  echo "Downloading dataset ${DATASET[i]}"
+  if [ ! -f "${DATADIR}/${DATASET[i]}.train" ]
+  then
+    wget -c "https://googledrive.com/host/${ID[i]}" -O "${DATADIR}/${DATASET[i]}_csv.tar.gz"
+    tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
+    cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
+    cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
+  fi
+done
+
+make
 
-for i in {0..1}
+for i in {0..7}
 do
   echo "Working on dataset ${DATASET[i]}"
   ./fasttext supervised -input "${DATADIR}/${DATASET[i]}.train" \

+ 23 - 5
classification.sh

@@ -8,17 +8,35 @@
 # of patent rights can be found in the PATENTS file in the same directory.
 #
 
-make
+myshuf() {
+  perl -MList::Util=shuffle -e 'print shuffle(<>);' "$@";
+}
 
-export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH
+normalize_text() {
+  tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | \
+    sed -e "s/'/ ' /g" -e 's/"//g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' \
+        -e 's/,/ , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
+        -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' | tr -s " " | myshuf
+}
 
 RESULTDIR=result
 DATADIR=data
 
 mkdir -p "${RESULTDIR}"
+mkdir -p "${DATADIR}"
+
+if [ ! -f "${DATADIR}/dbpedia.train" ]
+then
+  wget -c "https://googledrive.com/host/0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k" -O "${DATADIR}/dbpedia_csv.tar.gz"
+  tar -xzvf "${DATADIR}/dbpedia_csv.tar.gz" -C "${DATADIR}"
+  cat "${DATADIR}/dbpedia_csv/train.csv" | normalize_text > "${DATADIR}/dbpedia.train"
+  cat "${DATADIR}/dbpedia_csv/test.csv" | normalize_text > "${DATADIR}/dbpedia.test"
+fi
+
+make
 
-./fasttext supervised -input "${DATADIR}/yelp_review_full.train" -output "${RESULTDIR}/yelp_review_full" -dim 10 -lr 0.1 -wordNgrams 2 -minCount 1 -bucket 10000000 -epoch 5 -thread 4
+./fasttext supervised -input "${DATADIR}/dbpedia.train" -output "${RESULTDIR}/dbpedia" -dim 10 -lr 0.1 -wordNgrams 2 -minCount 1 -bucket 10000000 -epoch 5 -thread 4
 
-./fasttext test "${RESULTDIR}/yelp_review_full.bin" "${DATADIR}/yelp_review_full.test"
+./fasttext test "${RESULTDIR}/dbpedia.bin" "${DATADIR}/dbpedia.test"
 
-./fasttext predict "${RESULTDIR}/yelp_review_full.bin" "${DATADIR}/yelp_review_full.test" > "${RESULTDIR}/yelp_review_full.test.predict"
+./fasttext predict "${RESULTDIR}/dbpedia.bin" "${DATADIR}/dbpedia.test" > "${RESULTDIR}/dbpedia.test.predict"

+ 0 - 54
get-data.sh

@@ -1,54 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
-#
-
-normalize_text() {
-  # sed -e 's/^"//g;s/"$//g;s/","/ /g' | awk '{print "__label__" $0}' | tr '[:upper:]' '[:lower:]' | \
-  #        sed -e "s/’/'/g" -e "s/′/'/g" -e "s/''/ /g" -e "s/'/ ' /g" -e "s/“/\"/g" -e "s/”/\"/g" \
-  #            -e 's/"/ " /g' -e 's/\./ \. /g' -e 's/<br \/>/ /g' -e 's/, / , /g' -e 's/(/ ( /g' -e 's/)/ ) /g' -e 's/\!/ \! /g' \
-  #            -e 's/\?/ \? /g' -e 's/\;/ /g' -e 's/\:/ /g' -e 's/-/ - /g' -e 's/=/ /g' -e 's/=/ /g' -e 's/*/ /g' -e 's/|/ /g' \
-  #            -e 's/«/ /g' | tr -s " " | perl -MList::Util=shuffle -e 'print shuffle(<STDIN>);'
-  sed -e 's/^"//g;s/"$//g;s/","/ /g;s/,"/ /g;s/",/ /g' | tr '[:upper:]' '[:lower:]' | sed -e 's/^/__label__/g' | perl -MList::Util=shuffle -e 'print shuffle(<STDIN>);'
-}
-
-DATADIR=data
-
-mkdir -p "${DATADIR}"
-
-if [ ! -f "${DATADIR}/text9" ]
-then
-  wget -c http://mattmahoney.net/dc/enwik9.zip -P "${DATADIR}"
-  unzip "${DATADIR}/enwik9.zip" -d "${DATADIR}"
-  perl wikifil.pl "${DATADIR}/enwik9" > "${DATADIR}"/text9
-fi
-
-if [ ! -f "${DATADIR}/rw/rw.txt" ]
-then
-  wget -c http://www-nlp.stanford.edu/~lmthang/morphoNLM/rw.zip -P "${DATADIR}"
-  unzip "${DATADIR}/rw.zip" -d "${DATADIR}"
-fi
-
-DATASET=( ag_news amazon_review_full amazon_review_polarity dbpedia
-          sogou_news yahoo_answers yelp_review_full yelp_review_polarity )
-ID=( 0Bz8a_Dbh9QhbUDNpeUdjb0wxRms 0Bz8a_Dbh9QhbZVhsUnRWRDhETzA
-      0Bz8a_Dbh9QhbaW12WVVZS2drcnM 0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k
-      0Bz8a_Dbh9QhbUkVqNEszd0pHaFE 0Bz8a_Dbh9Qhbd2JNdDBsQUdocVU
-      0Bz8a_Dbh9QhbZlU4dXhHTFhZQU0 0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg )
-
-for i in {0..7}
-do
-  echo "Downloading dataset ${DATASET[i]}"
-  if [ ! -f "${DATADIR}/${DATASET[i]}_csv/train.csv" ]
-  then
-    wget -c "https://googledrive.com/host/${ID[i]}" -O "${DATADIR}/${DATASET[i]}_csv.tar.gz"
-    tar -xzvf "${DATADIR}/${DATASET[i]}_csv.tar.gz" -C "${DATADIR}"
-  fi
-  cat "${DATADIR}/${DATASET[i]}_csv/train.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.train"
-  cat "${DATADIR}/${DATASET[i]}_csv/test.csv" | normalize_text > "${DATADIR}/${DATASET[i]}.test"
-done

+ 16 - 8
get-vectors.sh

@@ -8,18 +8,26 @@
 # of patent rights can be found in the PATENTS file in the same directory.
 #
 
-export LC_ALL=en_US.UTF-8
-export LANG=en_US.UTF-8
-export LANGUAGE=en_US.UTF-8
-
-make
-
-export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH
-
 RESULTDIR=result
 DATADIR=data
 
 mkdir -p "${RESULTDIR}"
+mkdir -p "${DATADIR}"
+
+if [ ! -f "${DATADIR}/text9" ]
+then
+  wget -c http://mattmahoney.net/dc/enwik9.zip -P "${DATADIR}"
+  unzip "${DATADIR}/enwik9.zip" -d "${DATADIR}"
+  perl wikifil.pl "${DATADIR}/enwik9" > "${DATADIR}"/text9
+fi
+
+if [ ! -f "${DATADIR}/rw/rw.txt" ]
+then
+  wget -c http://www-nlp.stanford.edu/~lmthang/morphoNLM/rw.zip -P "${DATADIR}"
+  unzip "${DATADIR}/rw.zip" -d "${DATADIR}"
+fi
+
+make
 
 ./fasttext skipgram -input "${DATADIR}"/text9 -output "${RESULTDIR}"/text9 -lr 0.025 -dim 100 \
   -ws 5 -epoch 1 -minCount 5 -neg 5 -sampling tf -loss ns \