Przeglądaj źródła

Removing obsolete scripts

Przemek Strzelczyk 6 lat temu
rodzic
commit
15075aab0c

+ 0 - 442
TensorFlow/LanguageModeling/BERT/create_pretraining_data.py

@@ -1,442 +0,0 @@
-# coding=utf-8
-# Copyright 2018 The Google AI Language Team Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Create masked LM/next sentence masked_lm TF examples for BERT."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-import random
-import tokenization
-import tensorflow as tf
-
-flags = tf.flags
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string("input_file", None,
-                    "Input raw text file (or comma-separated list of files).")
-
-flags.DEFINE_string(
-    "output_file", None,
-    "Output TF example file (or comma-separated list of files).")
-
-flags.DEFINE_string("vocab_file", None,
-                    "The vocabulary file that the BERT model was trained on.")
-
-flags.DEFINE_bool(
-    "do_lower_case", True,
-    "Whether to lower case the input text. Should be True for uncased "
-    "models and False for cased models.")
-
-flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
-
-flags.DEFINE_integer("max_predictions_per_seq", 20,
-                     "Maximum number of masked LM predictions per sequence.")
-
-flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
-
-flags.DEFINE_integer(
-    "dupe_factor", 10,
-    "Number of times to duplicate the input data (with different masks).")
-
-flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
-
-flags.DEFINE_float(
-    "short_seq_prob", 0.1,
-    "Probability of creating sequences which are shorter than the "
-    "maximum length.")
-
-
-class TrainingInstance(object):
-  """A single training instance (sentence pair)."""
-
-  def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
-               is_random_next):
-    self.tokens = tokens
-    self.segment_ids = segment_ids
-    self.is_random_next = is_random_next
-    self.masked_lm_positions = masked_lm_positions
-    self.masked_lm_labels = masked_lm_labels
-
-  def __str__(self):
-    s = ""
-    s += "tokens: %s\n" % (" ".join(
-        [tokenization.printable_text(x) for x in self.tokens]))
-    s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
-    s += "is_random_next: %s\n" % self.is_random_next
-    s += "masked_lm_positions: %s\n" % (" ".join(
-        [str(x) for x in self.masked_lm_positions]))
-    s += "masked_lm_labels: %s\n" % (" ".join(
-        [tokenization.printable_text(x) for x in self.masked_lm_labels]))
-    s += "\n"
-    return s
-
-  def __repr__(self):
-    return self.__str__()
-
-
-def write_instance_to_example_files(instances, tokenizer, max_seq_length,
-                                    max_predictions_per_seq, output_files):
-  """Create TF example files from `TrainingInstance`s."""
-  writers = []
-  for output_file in output_files:
-    writers.append(tf.python_io.TFRecordWriter(output_file))
-
-  writer_index = 0
-
-  total_written = 0
-  for (inst_index, instance) in enumerate(instances):
-    input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
-    input_mask = [1] * len(input_ids)
-    segment_ids = list(instance.segment_ids)
-    assert len(input_ids) <= max_seq_length
-
-    while len(input_ids) < max_seq_length:
-      input_ids.append(0)
-      input_mask.append(0)
-      segment_ids.append(0)
-
-    assert len(input_ids) == max_seq_length
-    assert len(input_mask) == max_seq_length
-    assert len(segment_ids) == max_seq_length
-
-    masked_lm_positions = list(instance.masked_lm_positions)
-    masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
-    masked_lm_weights = [1.0] * len(masked_lm_ids)
-
-    while len(masked_lm_positions) < max_predictions_per_seq:
-      masked_lm_positions.append(0)
-      masked_lm_ids.append(0)
-      masked_lm_weights.append(0.0)
-
-    next_sentence_label = 1 if instance.is_random_next else 0
-
-    features = collections.OrderedDict()
-    features["input_ids"] = create_int_feature(input_ids)
-    features["input_mask"] = create_int_feature(input_mask)
-    features["segment_ids"] = create_int_feature(segment_ids)
-    features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
-    features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
-    features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
-    features["next_sentence_labels"] = create_int_feature([next_sentence_label])
-
-    tf_example = tf.train.Example(features=tf.train.Features(feature=features))
-
-    writers[writer_index].write(tf_example.SerializeToString())
-    writer_index = (writer_index + 1) % len(writers)
-
-    total_written += 1
-
-    if inst_index < 20:
-      tf.logging.info("*** Example ***")
-      tf.logging.info("tokens: %s" % " ".join(
-          [tokenization.printable_text(x) for x in instance.tokens]))
-
-      for feature_name in features.keys():
-        feature = features[feature_name]
-        values = []
-        if feature.int64_list.value:
-          values = feature.int64_list.value
-        elif feature.float_list.value:
-          values = feature.float_list.value
-        tf.logging.info(
-            "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
-
-  for writer in writers:
-    writer.close()
-
-  tf.logging.info("Wrote %d total instances", total_written)
-
-
-def create_int_feature(values):
-  feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
-  return feature
-
-
-def create_float_feature(values):
-  feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
-  return feature
-
-
-def create_training_instances(input_files, tokenizer, max_seq_length,
-                              dupe_factor, short_seq_prob, masked_lm_prob,
-                              max_predictions_per_seq, rng):
-  """Create `TrainingInstance`s from raw text."""
-  all_documents = [[]]
-
-  # Input file format:
-  # (1) One sentence per line. These should ideally be actual sentences, not
-  # entire paragraphs or arbitrary spans of text. (Because we use the
-  # sentence boundaries for the "next sentence prediction" task).
-  # (2) Blank lines between documents. Document boundaries are needed so
-  # that the "next sentence prediction" task doesn't span between documents.
-  for input_file in input_files:
-    with tf.gfile.GFile(input_file, "r") as reader:
-      while True:
-        line = tokenization.convert_to_unicode(reader.readline())
-        if not line:
-          break
-        line = line.strip()
-
-        # Empty lines are used as document delimiters
-        if not line:
-          all_documents.append([])
-        tokens = tokenizer.tokenize(line)
-        if tokens:
-          all_documents[-1].append(tokens)
-
-  # Remove empty documents
-  all_documents = [x for x in all_documents if x]
-  rng.shuffle(all_documents)
-
-  vocab_words = list(tokenizer.vocab.keys())
-  instances = []
-  for _ in range(dupe_factor):
-    for document_index in range(len(all_documents)):
-      instances.extend(
-          create_instances_from_document(
-              all_documents, document_index, max_seq_length, short_seq_prob,
-              masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
-
-  rng.shuffle(instances)
-  return instances
-
-
-def create_instances_from_document(
-    all_documents, document_index, max_seq_length, short_seq_prob,
-    masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
-  """Creates `TrainingInstance`s for a single document."""
-  document = all_documents[document_index]
-
-  # Account for [CLS], [SEP], [SEP]
-  max_num_tokens = max_seq_length - 3
-
-  # We *usually* want to fill up the entire sequence since we are padding
-  # to `max_seq_length` anyways, so short sequences are generally wasted
-  # computation. However, we *sometimes*
-  # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
-  # sequences to minimize the mismatch between pre-training and fine-tuning.
-  # The `target_seq_length` is just a rough target however, whereas
-  # `max_seq_length` is a hard limit.
-  target_seq_length = max_num_tokens
-  if rng.random() < short_seq_prob:
-    target_seq_length = rng.randint(2, max_num_tokens)
-
-  # We DON'T just concatenate all of the tokens from a document into a long
-  # sequence and choose an arbitrary split point because this would make the
-  # next sentence prediction task too easy. Instead, we split the input into
-  # segments "A" and "B" based on the actual "sentences" provided by the user
-  # input.
-  instances = []
-  current_chunk = []
-  current_length = 0
-  i = 0
-  while i < len(document):
-    segment = document[i]
-    current_chunk.append(segment)
-    current_length += len(segment)
-    if i == len(document) - 1 or current_length >= target_seq_length:
-      if current_chunk:
-        # `a_end` is how many segments from `current_chunk` go into the `A`
-        # (first) sentence.
-        a_end = 1
-        if len(current_chunk) >= 2:
-          a_end = rng.randint(1, len(current_chunk) - 1)
-
-        tokens_a = []
-        for j in range(a_end):
-          tokens_a.extend(current_chunk[j])
-
-        tokens_b = []
-        # Random next
-        is_random_next = False
-        if len(current_chunk) == 1 or rng.random() < 0.5:
-          is_random_next = True
-          target_b_length = target_seq_length - len(tokens_a)
-
-          # This should rarely go for more than one iteration for large
-          # corpora. However, just to be careful, we try to make sure that
-          # the random document is not the same as the document
-          # we're processing.
-          for _ in range(10):
-            random_document_index = rng.randint(0, len(all_documents) - 1)
-            if random_document_index != document_index:
-              break
-
-          random_document = all_documents[random_document_index]
-          random_start = rng.randint(0, len(random_document) - 1)
-          for j in range(random_start, len(random_document)):
-            tokens_b.extend(random_document[j])
-            if len(tokens_b) >= target_b_length:
-              break
-          # We didn't actually use these segments so we "put them back" so
-          # they don't go to waste.
-          num_unused_segments = len(current_chunk) - a_end
-          i -= num_unused_segments
-        # Actual next
-        else:
-          is_random_next = False
-          for j in range(a_end, len(current_chunk)):
-            tokens_b.extend(current_chunk[j])
-        truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
-
-        assert len(tokens_a) >= 1
-        assert len(tokens_b) >= 1
-
-        tokens = []
-        segment_ids = []
-        tokens.append("[CLS]")
-        segment_ids.append(0)
-        for token in tokens_a:
-          tokens.append(token)
-          segment_ids.append(0)
-
-        tokens.append("[SEP]")
-        segment_ids.append(0)
-
-        for token in tokens_b:
-          tokens.append(token)
-          segment_ids.append(1)
-        tokens.append("[SEP]")
-        segment_ids.append(1)
-
-        (tokens, masked_lm_positions,
-         masked_lm_labels) = create_masked_lm_predictions(
-             tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
-        instance = TrainingInstance(
-            tokens=tokens,
-            segment_ids=segment_ids,
-            is_random_next=is_random_next,
-            masked_lm_positions=masked_lm_positions,
-            masked_lm_labels=masked_lm_labels)
-        instances.append(instance)
-      current_chunk = []
-      current_length = 0
-    i += 1
-
-  return instances
-
-
-MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
-                                          ["index", "label"])
-
-
-def create_masked_lm_predictions(tokens, masked_lm_prob,
-                                 max_predictions_per_seq, vocab_words, rng):
-  """Creates the predictions for the masked LM objective."""
-
-  cand_indexes = []
-  for (i, token) in enumerate(tokens):
-    if token == "[CLS]" or token == "[SEP]":
-      continue
-    cand_indexes.append(i)
-
-  rng.shuffle(cand_indexes)
-
-  output_tokens = list(tokens)
-
-  num_to_predict = min(max_predictions_per_seq,
-                       max(1, int(round(len(tokens) * masked_lm_prob))))
-
-  masked_lms = []
-  covered_indexes = set()
-  for index in cand_indexes:
-    if len(masked_lms) >= num_to_predict:
-      break
-    if index in covered_indexes:
-      continue
-    covered_indexes.add(index)
-
-    masked_token = None
-    # 80% of the time, replace with [MASK]
-    if rng.random() < 0.8:
-      masked_token = "[MASK]"
-    else:
-      # 10% of the time, keep original
-      if rng.random() < 0.5:
-        masked_token = tokens[index]
-      # 10% of the time, replace with random word
-      else:
-        masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
-
-    output_tokens[index] = masked_token
-
-    masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
-
-  masked_lms = sorted(masked_lms, key=lambda x: x.index)
-
-  masked_lm_positions = []
-  masked_lm_labels = []
-  for p in masked_lms:
-    masked_lm_positions.append(p.index)
-    masked_lm_labels.append(p.label)
-
-  return (output_tokens, masked_lm_positions, masked_lm_labels)
-
-
-def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
-  """Truncates a pair of sequences to a maximum sequence length."""
-  while True:
-    total_length = len(tokens_a) + len(tokens_b)
-    if total_length <= max_num_tokens:
-      break
-
-    trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
-    assert len(trunc_tokens) >= 1
-
-    # We want to sometimes truncate from the front and sometimes from the
-    # back to add more randomness and avoid biases.
-    if rng.random() < 0.5:
-      del trunc_tokens[0]
-    else:
-      trunc_tokens.pop()
-
-
-def main(_):
-  tf.logging.set_verbosity(tf.logging.INFO)
-
-  tokenizer = tokenization.FullTokenizer(
-      vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
-
-  input_files = []
-  for input_pattern in FLAGS.input_file.split(","):
-    input_files.extend(tf.gfile.Glob(input_pattern))
-
-  tf.logging.info("*** Reading from input files ***")
-  for input_file in input_files:
-    tf.logging.info("  %s", input_file)
-
-  rng = random.Random(FLAGS.random_seed)
-  instances = create_training_instances(
-      input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
-      FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
-      rng)
-
-  output_files = FLAGS.output_file.split(",")
-  tf.logging.info("*** Writing to output files ***")
-  for output_file in output_files:
-    tf.logging.info("  %s", output_file)
-
-  write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
-                                  FLAGS.max_predictions_per_seq, output_files)
-
-
-if __name__ == "__main__":
-  flags.mark_flag_as_required("input_file")
-  flags.mark_flag_as_required("output_file")
-  flags.mark_flag_as_required("vocab_file")
-  tf.app.run()

+ 0 - 184
TensorFlow/LanguageModeling/BERT/scripts/run.sub

@@ -1,184 +0,0 @@
-#!/bin/bash
-#SBATCH -p mlperf		# partition
-#SBATCH -N 1       		# number of nodes
-#SBATCH -t 12:00:00		# wall time
-#SBATCH -J image_classification	# job name
-#SBATCH --exclusive   		# exclusive node access
-#SBATCH --mem=0   		# all mem avail
-#SBATCH --mail-type=FAIL        # only send email on failure
-#SBATCH --ntasks-per-node=8	# n tasks per machine (one task per gpu)
-#SBATCH --threads-per-core=2	# HT is on
-#SBATCH --cores-per-socket=20	# 20 cores on each socket 
-#SBATCH --overcommit
-
-hostname
-#DGXIBDEVICES=$(eval ls /dev/infiniband/ | tr " " "\n" | awk '{printf "--device=/dev/infiniband/%s ",$1}' | sed s'/.$//')
-printf "DGXIBDEVICES=%s\n" "$DGXIBDEVICES"
-printf "VOLS=%s\n" "$VOLS"
-printf "EXTRA_PARAMS=%s\n" "$EXTRA_PARAMS"
-
-cd $CODEDIR
-
-VOLS+=" -v $CHKPTDIR/$SLURM_JOB_ID:/checkpoints"
-
-mkdir -p $CHKPTDIR/$SLURM_JOB_ID
-
-## DO NOT CHANGE ANYTHING BELOW -- DL params are in run_and_time.sh and config_<system>.sh files 
-
-DEBUG=1  # 1 = Print verbose messages for debugging
-
-## Pre-warming the containers ##
-hosts=( `scontrol show hostname |tr "\n" " "` )
-pids=(); for hostn in ${hosts[@]}; do
-  timeout -k 600s 600s \
-  srun -N 1 -n 1 -w $hostn \
-    docker pull $CONT &
-  pids+=($!);
-   pids+=($!); rets+=($?);
-done
-wait "${pids[@]}"
-success=0; for s in ${rets[@]}; do ((success+=s)); done ; if [ $success -ne 0 ]; then echo "ERR: Container pull failed"; exit $success ; fi
-
-IBDEVICES=${IBDEVICES:-$DGXIBDEVICES}
-
-## Check whether we are running in a slurm env
-INSLURM=1
-if [[ -z "$SLURM_JOB_ID" ]]; then
-  INSLURM=0
-  export SLURM_JOB_ID="${DATESTAMP}"
-  export SLURM_NNODES=1
-fi
-if [[ -z "SLURM_JOB_ID" || $SLURM_NNODES -eq 1 ]]; then
-  # don't need IB if not multi-node
-  export IBDEVICES=""
-fi
-
-# Create results directory
-LOGFILE_BASE="${LOGDIR}/${DATESTAMP}"
-mkdir -p $(dirname "${LOGFILE_BASE}")
-
-export CONTNAME="mpi_${SLURM_JOB_ID}"
-export DOCKEREXEC="nvidia-docker run --rm --net=host --uts=host --ipc=host --ulimit stack=67108864 --ulimit memlock=-1 --security-opt seccomp=unconfined  $IBDEVICES"
-MPICMD="mpirun --allow-run-as-root --tag-output --bind-to none -np $((SLURM_NNODES*DGXNGPU)) -x EXTRA_PARAMS=\"${EXTRA_PARAMS}\" -x HOROVOD_MPI_THREADS_DISABLE=1 -x NCCL_LL_THRESHOLD=0 -x NCCL_DEBUG=INFO -x NCCL_NET_GDR_READ=1 -x NCCL_SOCKET_IFNAME=^docker0,bond0,lo $BIND ./run_pretraining.sh"
-echo $MPICMD
-
-mkdir -m 777 -p $LOGDIR
-echo $MPICMD | tee -a $LOGDIR/$DATESTAMP.log 
-echo "slurm job id" $SLURM_JOB_ID &> $LOGDIR/$DATESTAMP.log 
-
-MASTER_IP=`getent hosts \`hostname\` | cut -d ' ' -f1`
-SSH=''
-SRUN=''
-if [[ $INSLURM -eq 0 ]]; then
-  export hosts=( `hostname` )
-else
-  export hosts=( `scontrol show hostname |tr "\n" " "` )
-  SSH='ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $hostn'
-  SRUN='srun -N 1 -n 1 -w $hostn'
-fi
-unique_hosts=( $(echo "${hosts[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ' ) )
-export MASTER_HOST=${hosts[0]}
-
-VARS="-e OMPI_MCA_mca_base_param_files=/dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf -e EXTRA_PARAMS -e GPUS -e BATCHSIZE -e CONT -e DGXSYSTEM=$DGXSYSTEM -e MASTER_HOST -e MASTER_IP -e SLURM_JOB_NUM_NODES -e SLURM_NNODES -e SLURM_NTASKS_PER_NODE -w /workspace/bert"
-
-RUNSLEEPCMD=""
-
-[[ "${PULL}" -eq "1" ]] && docker pull $CONT
-
-## Setting up MPI
-# MPI support files - in /dev/shm/mpi/<jobid>
-# 1. Copy user keys to /dev/shm/mpi/<jobid>
-# 2. Create mca_params.conf
-# 3. Create sshentry.sh to support lauching into containers on worker nodes
-# 4. Create mpi_hosts file
-# 5. Copy standard ssh
-
-if [[ $SLURM_NNODES -ne "1" ]]; then
-
-  # Make keys and copy
-  echo
-
-  [[ $DEBUG == 1 ]] && echo "Setting up ssh keys and config"
-
-  mkdir -p ${HOME}/.ssh/sbatch/${SLURM_JOB_ID}
-  ssh-keygen -t rsa -b 2048 -n "" -f "${HOME}/.ssh/sbatch/${SLURM_JOB_ID}/sshkey.rsa" -C "mxnet_${SLURM_JOB_ID}_"  &>/dev/null
-  echo command=\"/dev/shm/mpi/${SLURM_JOB_ID}/sshentry.sh\",no-port-forwarding,no-agent-forwarding,no-X11-forwarding $(cat ${HOME}/.ssh/sbatch/${SLURM_JOB_ID}/sshkey.rsa.pub) >> ${HOME}/.ssh/authorized_keys
-  chmod 600 ~/.ssh/authorized_keys
-
-  [[ $DEBUG == 1 ]] && echo "Copy keys: srun -n $SLURM_JOB_NUM_NODES mkdir /dev/shm/mpi && cp -R ${HOME}/.ssh/sbatch/${SLURM_JOB_ID} /dev/shm/mpi && chmod 700 /dev/shm/mpi/${SLURM_JOB_ID}" 
-
-  srun  -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 bash -c "mkdir -p /dev/shm/mpi/${SLURM_JOB_ID}; cp -R ${HOME}/.ssh/sbatch/${SLURM_JOB_ID} /dev/shm/mpi; chmod 700 /dev/shm/mpi/${SLURM_JOB_ID}"
-
-  sleep 2 # Making copy
-
-  [[ $DEBUG == 1 ]] && ls /dev/shm
-
-  # Create mpi config file
-  srun  -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 tee /dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf <<EOF
-plm_rsh_agent = /usr/bin/ssh
-plm_rsh_args = -i /dev/shm/mpi/${SLURM_JOB_ID}/sshkey.rsa -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -l ${USER}
-orte_default_hostfile = /dev/shm/mpi/${SLURM_JOB_ID}/mpi_hosts
-btl_openib_warn_default_gid_prefix = 0
-mpi_warn_on_fork = 0
-allow_run_as_root = 1
-EOF
-
-  [[ $DEBUG == 1 ]] && echo "::mca_params.conf=" && cat /dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf
-
-  # Create ssh helper script that transfers an ssh into a compute node into the running container on that node
-  srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 tee /dev/shm/mpi/${SLURM_JOB_ID}/sshentry.sh <<EOF
-#!/bin/bash
-echo "::sshentry: entered \$(hostname)"
-[[ -f $CONTNAME ]] && "::worker container not found error" && exit 1
-echo "::sshentry: running \$SSH_ORIGINAL_COMMAND"
-exec docker exec $CONTNAME /bin/bash -c "\$SSH_ORIGINAL_COMMAND"
-EOF
-
-  [[ $DEBUG == 1 ]] && echo "::sshentry=" && cat /dev/shm/mpi/${SLURM_JOB_ID}/sshentry.sh
-
-  # Create mpi hostlist
-  for h in ${hosts[@]}; do
-     echo "$h slots=${SLURM_NTASKS_PER_NODE}" >> /dev/shm/mpi/${SLURM_JOB_ID}/mpi_hosts
-  done
-
-  [[ $DEBUG == 1 ]] && echo '::mpi-host file=' && cat /dev/shm/mpi/${SLURM_JOB_ID}/mpi_hosts
-
-  srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 bash -c "cp $(which ssh) /dev/shm/mpi/${SLURM_JOB_ID}/.;  chmod 755 /dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf;  chmod 755 /dev/shm/mpi/${SLURM_JOB_ID}/sshentry.sh"
-
-  # Check that ssh/mpi dir has correct number of files
-  [[ $(ls /dev/shm/mpi/${SLURM_JOB_ID} | wc -w) -lt 5 ]]  && echo "ERR: /dev/shm/mpi/${SLURM_JOB_ID} doesn't exist or missing ssh/mpi files" && exit $?
-
-fi
-
-# Container launch
-if [[ $INSLURM -eq 1 ]]; then
-
-  # Launch containers behind srun
-
-  [[ $DEBUG == 1 ]] && echo "" && echo ":Launch containers:  srun  -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity'"
-  srun  -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity' & rv=$?
-else
-  $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity' & rv=$?
-fi
-[[ $rv -ne 0 ]] && echo "ERR: Launch sleep containers failed." && exit $rv
-echo "sleep 60 while we pull our container, good golly!"
-sleep 60
-
-# Run benchmarks
-echo "sleep again for 20"
-sleep 20
-export EXTRA_PARAMS
-
-(
-# Launching app
-echo 
-echo "Launching user script on master node:"
-  hostn=$MASTER_HOST
-  $(eval echo $SSH) docker exec $VARS $CONTNAME $MPICMD ; rv=$?
-  [[ $rv -ne 0 ]] && echo "ERR: User script failed." && exit $rv
-) |& tee ${LOGFILE_BASE}_$nrun.log
-
-# Clean up (note: on SLURM we skip this, as the epilogue will take care of it)
-if [[ $INSLURM -eq 0 ]]; then
-  docker rm -f $CONTNAME
-fi

+ 0 - 90
TensorFlow/LanguageModeling/BERT/scripts/start_pretraining.sh

@@ -1,90 +0,0 @@
-#!/bin/bash
-
-node_type=${1:-"dgx1"}
-num_nodes=${2:-1}
-partition=${3:-"default"}
-wall_time=${4:-"12:00:00"}
-job_name=${5:-"tf_bert"}
-root_dir=${6:-"$PWD"}
-train_batch_size=${7:-4}
-eval_batch_size=${8:-4}
-train_steps=${9:-1000000}
-warmup_steps=${10:-10000}
-learning_rate=${11:-1e-4}
-precision=${12:-"fp16_xla"}
-save_checkpoint_steps=${13:-5000}
-results_dir=${14:-"$root_dir/results"}
-checkpoints_dir=${15:-"$root_dir/checkpoints"}
-
-CONT=${CONT:-"gitlab-master.nvidia.com:5005/dl/dgx/tensorflow:19.03-py3-devel"}
-
-BENCHMARK=${BENCHMARK:-"bert"}
-BENCHMARK_NAME="bert"
-
-if [ "$node_type" = "dgx1" ] ; then
-   echo "Running on dgx1 systems"
-   DGXSYSTEM="DGX1"
-   DGXNGPU=8
-   DGXSOCKETCORES=20
-   DGXNSOCKET=2
-   DGXHT=2
-   DGXIBDEVICES='--device=/dev/infiniband --device=/dev/infiniband/rdma_cm --device=/dev/infiniband/ucm3 --device=/dev/infiniband/ucm2 --device=/dev/infiniband/ucm1 --device=/dev/infiniband/ucm0 --device=/dev/infiniband/uverbs3 --device=/dev/infiniband/uverbs2 --device=/dev/infiniband/uverbs1 --device=/dev/infiniband/uverbs0 --device=/dev/infiniband/issm3 --device=/dev/infiniband/umad3 --device=/dev/infiniband/issm2 --device=/dev/infiniband/umad2 --device=/dev/infiniband/issm1 --device=/dev/infiniband/umad1 --device=/dev/infiniband/issm0 --device=/dev/infiniband/umad0'
-elif [ "$node_type" = "dgx2h" ] ; then
-   echo "Running on dgx2h systems"
-   DGXSYSTEM="DGX2H"
-   DGXNGPU=16
-   DGXSOCKETCORES=24
-   DGXNSOCKET=2
-   DGXHT=2         # HT is on is 2, HT off is 1
-   DGXIBDEVICES='--device=/dev/infiniband/rdma_cm --device=/dev/infiniband/ucm10 --device=/dev/infiniband/ucm9 --device=/dev/infiniband/ucm8 --device=/dev/infiniband/ucm7 --device=/dev/infiniband/ucm4 --device=/dev/infiniband/ucm3 --device=/dev/infiniband/ucm2 --device=/dev/infiniband/ucm1 --device=/dev/infiniband/uverbs10 --device=/dev/infiniband/uverbs9 --device=/dev/infiniband/uverbs8 --device=/dev/infiniband/uverbs7 --device=/dev/infiniband/uverbs4 --device=/dev/infiniband/uverbs3 --device=/dev/infiniband/uverbs2 --device=/dev/infiniband/uverbs1 --device=/dev/infiniband/issm10 --device=/dev/infiniband/umad10 --device=/dev/infiniband/issm9 --device=/dev/infiniband/umad9 --device=/dev/infiniband/issm8 --device=/dev/infiniband/umad8 --device=/dev/infiniband/issm7 --device=/dev/infiniband/umad7 --device=/dev/infiniband/issm4 --device=/dev/infiniband/umad4 --device=/dev/infiniband/issm3 --device=/dev/infiniband/umad3 --device=/dev/infiniband/issm2 --device=/dev/infiniband/umad2 --device=/dev/infiniband/issm1 --device=/dev/infiniband/umad1'
-else
-   echo "Unknown <node_type>, must be either dgx1 or dgx2"
-   exit -1
-fi
-
-printf -v EXTRA_PARAMS "%d %d %e %s 1 %d %d %d false" $train_batch_size $eval_batch_size $learning_rate "$precision" $warmup_steps $train_steps $save_checkpoint_steps
-
-export ROOTDIR=$root_dir
-export WIKI_DIR=${WIKI_DIR:-$CODEDIR/data/wikipedia_corpus/final_tfrecords_sharded}
-export BOOKS_DIR=${BOOKS_DIR:-$CODEDIR/data/bookcorpus/final_tfrecords_sharded}
-
-VOLS="-v $ROOTDIR:/workspace/bert"
-VOLS+=" -v $WIKI_DIR:/workspace/bert/data/wikipedia_corpus/final_tfrecord_sharded"
-VOLS+=" -v $BOOKS_DIR:/workspace/bert/data/bookcorpus/final_tfrecord_sharded"
-VOLS+=" -v $results_dir:/results"
-VOLS+=" -v $checkpoints_dir:/checkpoints"
-
-export VOLS
-export CONT
-export DGXSYSTEM
-export DGXNGPU
-export DGXIBDEVICES
-export EXTRA_PARAMS
-
-set -x
-cd $CODEDIR
-pwd
-
-PART=""
-if [ "$partition" != "default" ] ; then
-   printf -v PART "%s" "-p $partition"
-fi
-
-export GBS=$(expr $num_nodes \* $batch_size \* $DGXNGPU)
-printf -v TAG "%s_%dn_%s_gbs%d" "$job_name" $num_nodes "$precision" $GBS
-export DATESTAMP=`date +'%y%m%d%H%M%S'`
-
-sbatch $PART \
-        -N $num_nodes \
-        -t $wall_time \
-        -J $job_name \
-        --exclusive \
-        --mem=0 \
-        --mail-type=FAIL \
-        --ntasks-per-node=$DGXNGPU \
-        --threads-per-core=$DGXHT \
-        --cores-per-socket=$DGXSOCKETCORES \
-        --output=$LOGDIR/$TAG.$DATESTAMP.log \
-        $CODEDIR/scripts/run.sub
-set +x
-