jramompichel's picture
Upload 2 files
ca6abb5 verified
save_data: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/models
## Where the vocab(s) will be written
src_vocab: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/models/run/bpe.vocab.src
tgt_vocab: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/models/run/bpe.vocab.tgt
overwrite: True
# Corpus opts:
data:
# en-es:
# path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-es/train.en10k.txt
# path_tgt: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-es/train.es10k.txt
#transforms: [bpe, filtertoolong]
#weight: 100
# en-pt:
# path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-pt/train.en10k.txt
# path_tgt: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-pt/train.pt10k.txt
# weight: 5
pt-gl:
path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/train.pt35k.txt
path_tgt: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/train.gl35k.txt
transforms: [bpe, filtertoolong]
# en-it:
# path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-it/train.en10k.txt
# path_tgt: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-it/train.it10k.txt
# en-ro:
# path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-ro/train.en10k.txt
# path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/nmt-pld/en-ro/train.ro10k.txt
valid:
path_src: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/valid.pt35k.txt
path_tgt: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/valid.gl35k.txt
transforms: [bpe, filtertoolong]
### Transform related opts:
#### Subword
src_subword_model: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/pt_35k.code
tgt_subword_model: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/gl_35k.code
#src_subword_vocab: /home/compartido/paulo/modelos/run/bpe.vocab.src
#tgt_subword_vocab: /home/compartido/paulo/modelos/run/bpe.vocab.tgt
#src_subword_model: ../sentencepiece/en-gl/en.sp.model
#tgt_subword_model: ../sentencepiece/en-gl/gl.sp.model
src_subword_type: bpe
tgt_subord_type: bpe
src_subword_nbest: 1
src_subword_alpha: 0.0
tgt_subword_nbest: 1
tgt_subword_alpha: 0.0
#### Filter
src_seq_length: 150
tgt_seq_length: 150
# silently ignore empty lines in the data
skip_empty_level: silent
##embeddings
#src_embeddings: /mnt/lustre/scratch/nlsas//home/usc/ci/pgo/modelos/embeddings/en.emb.txt
#tgt_embeddings: /mnt/lustre/scratch/nlsas//home/usc/ci/pgo/modelos/embeddings/gl.emb.txt
src_embeddings: /mnt/netapp1/Proxecto_NOS/mt/treino_data/embeddings/pt.emb.txt
tgt_embeddings: /mnt/netapp1/Proxecto_NOS/mt/treino_data/embeddings/gl.emb.txt
## supported types: GloVe, word2vec
embeddings_type: "word2vec"
# word_vec_size need to match with the pretrained embeddings dimensions
#word_vec_size: 300
# General opts
save_model: /mnt/netapp1/Proxecto_NOS/mt/corpus/iacobus/pt-gl/aut/models/
keep_checkpoint: 50
save_checkpoint_steps: 10000
average_decay: 0.0005
seed: 1234
report_every: 1000
train_steps: 400000
valid_steps: 10000
# Batching
queue_size: 10000
bucket_size: 32768
world_size: 1
gpu_ranks: [0]
batch_type: "tokens"
batch_size: 4096
valid_batch_size: 64
batch_size_multiple: 1
max_generator_batches: 2
accum_count: [4]
accum_steps: [0]
# Optimization
model_dtype: "fp16"
optim: "adam"
learning_rate: 2
warmup_steps: 8000
decay_method: "noam"
adam_beta2: 0.998
max_grad_norm: 0
label_smoothing: 0.1
param_init: 0
param_init_glorot: true
normalization: "tokens"
# Model
encoder_type: transformer
decoder_type: transformer
position_encoding: true
max_len: 6000
#max_relative_positions: 20
enc_layers: 12
dec_layers: 12
heads: 16
#rnn_size: 512
hidden_size: 512
word_vec_size: 512
transformer_ff: 2048
dropout_steps: [0]
dropout: [0.1]
attention_dropout: [0.1]
share_decoder_embeddings: true
share_embeddings: false