File size: 1,470 Bytes
c4f6f02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
add_ctxemb: 'False'
add_vocab: 'False'
all_gather: 'true'
batch_size_training: '16'
checkpoint_type: StateDictType.SHARDED_STATE_DICT
clipping_norm: '-1.0'
compare: 'False'
cpu_np_head: 'False'
ctx_proj_layer: 'False'
ctx_use_peft: 'False'
dataset: llava_selfrag_single_dataset
dist_checkpoint_folder: full.prompt_llava.context_mask.with_context.without_sp.key_original_epoch2
dist_checkpoint_root_folder: /apdcephfs_sh2/share_300000800/user/kaixinma/amylee/RedPajama-Data/img/results
enable_fsdp: 'True'
freeze_ctx_encoder: 'False'
freeze_layers: 'False'
freeze_question_encoder: 'False'
from_hf: 'False'
fsdp_activation_checkpointing: 'True'
gamma: '0.85'
load_np_head: 'False'
low_cpu_fsdp: 'True'
lr: 2e-05
memory_bank_length: '0'
micro_batch_size: '16'
mixed_precision: 'True'
model_name: meta-llama/Llama-2-7b-hf
model_use_peft: 'False'
natural_form: 'True'
np_weight: '100.0'
num_epochs: '3'
num_freeze_layers: '1'
num_workers_dataloader: '1'
one_gpu: 'False'
optimizer: AdamW
output_dir: peft_checkpoint
peft_method: None
pure_bf16: 'False'
quantization: 'False'
question_proj_layer: 'False'
resume_epoch: '0'
ret_checkpoint_folder: ''
ret_first: 'False'
retriever: ''
run_validation: 'True'
save_model: 'True'
save_optimizer: 'False'
seed: '2'
sharding_strategy: ShardingStrategy.FULL_SHARD
single: 'False'
target_modules: ''
token_name: meta-llama/Llama-2-7b-hf
train: 'True'
use_fast_kernels: 'False'
use_fp16: 'False'
val_batch_size: '1'
weight_decay: '0.0'