File size: 1,320 Bytes
f7308d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
data_cfgs:
  eval_data_files: null
  eval_datasets: /home/align-anything/datasets/40k_ti2t_filtered
  eval_name: null
  eval_optional_args: []
  eval_size: null
  eval_split: train
  eval_subset: null
  eval_template: AA_TI2T
  train_data_files: null
  train_datasets: /data/align-anything/jiayi/ti2t/refine_gen_wo_res/1021_13b_2000
  train_name: null
  train_optional_args: []
  train_size: null
  train_split: train
  train_subset: null
  train_template: AA_TI2T_Critique
logger_cfgs:
  cache_dir: null
  log_project: align-anything
  log_run_name: rm
  log_type: wandb
  output_dir: /data/align-anything/jiayi/ti2t/rm_10_22_ours_13b_2000
  save_interval: 598.0
model_cfgs:
  model_max_length: 2048
  model_name_or_path: /data/models/llava/llava-1.5-13b-hf
  trust_remote_code: true
special_tokens: null
train_cfgs:
  adam_betas:
  - 0.9
  - 0.95
  bf16: true
  ds_cfgs: ds_z3_config.json
  epochs: 5.0
  eval_interval: 10
  eval_strategy: epoch
  fp16: false
  freeze_language_model: false
  freeze_mm_proj: false
  freeze_vision_tower: true
  gradient_accumulation_steps: 1
  gradient_checkpointing: true
  learning_rate: 3.0e-05
  lr_scheduler_type: constant_with_warmup
  lr_warmup_ratio: 0.03
  per_device_eval_batch_size: 4
  per_device_train_batch_size: 4
  regularization: 0.001
  seed: 42
  weight_decay: 0.0