Cleaner lines for CI args
This commit is contained in:
parent
3438dd2beb
commit
a050b076cb
@ -14,7 +14,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--scorer "" \
|
||||
--augment dropout \
|
||||
|
@ -14,7 +14,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_csv} --test_batch_size 1 \
|
||||
|
@ -20,7 +20,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_sdb} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_sdb} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_sdb} --test_batch_size 1 \
|
||||
|
@ -17,7 +17,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--feature_cache '/tmp/ldc93s1_cache' \
|
||||
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
|
@ -17,7 +17,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_csv} --test_batch_size 1 \
|
||||
|
@ -23,7 +23,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_sdb} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_sdb} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_sdb} --test_batch_size 1 \
|
||||
|
@ -23,7 +23,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_sdb} ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--feature_cache '/tmp/ldc93s1_cache_sdb_csv' \
|
||||
--dev_files ${ldc93s1_sdb} ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
|
@ -14,7 +14,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false --early_stop false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false --early_stop false \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_csv} --test_batch_size 1 \
|
||||
|
@ -16,7 +16,8 @@ fi;
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false \
|
||||
--n_hidden 100 \
|
||||
--checkpoint_dir '/tmp/ckpt' \
|
||||
--export_dir '/tmp/train_tflite' \
|
||||
@ -26,7 +27,8 @@ python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar
|
||||
|
||||
mkdir /tmp/train_tflite/en-us
|
||||
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" --show_progressbar false \
|
||||
python -u train.py --alphabet_config_path "data/alphabet.txt" \
|
||||
--show_progressbar false \
|
||||
--n_hidden 100 \
|
||||
--checkpoint_dir '/tmp/ckpt' \
|
||||
--export_dir '/tmp/train_tflite/en-us' \
|
||||
|
Loading…
x
Reference in New Issue
Block a user