shaojintian commited on
Commit
92b1c73
·
verified ·
1 Parent(s): 272166c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture": "AutoDiffusionLM",
3
+ "architectures": [
4
+ "MyDecoderOnlyModel"
5
+ ],
6
+ "batch_size": 2,
7
+ "bias": false,
8
+ "block_size": "${model.length}",
9
+ "checkpointing": {
10
+ "resume_ckpt_path": "${.save_dir}/checkpoints/last.ckpt",
11
+ "resume_from_ckpt": true,
12
+ "save_dir": "${cwd:}"
13
+ },
14
+ "complex_attention": false,
15
+ "data": {
16
+ "output_dir": "./data/pt/huggingface/preprocessed_datasets",
17
+ "train": "./dataset/da"
18
+ },
19
+ "data_path": "./dataset/da",
20
+ "debug": false,
21
+ "diffusion": "absorbing_state",
22
+ "dropout": 0.0,
23
+ "embedding_type": "default",
24
+ "eval": {
25
+ "checkpoint_path": "${cwd:}/checkpoints/last.ckpt",
26
+ "compute_perplexity_on_sanity": false,
27
+ "disable_ema": false,
28
+ "gen_ppl_eval_model_name_or_path": "gpt2-large",
29
+ "generate_samples": false,
30
+ "perplexity_batch_size": 8
31
+ },
32
+ "flash_attention": false,
33
+ "hidden_dim": 512,
34
+ "hydra": {
35
+ "job": {
36
+ "chdir": true
37
+ },
38
+ "run": {
39
+ "dir": "./"
40
+ }
41
+ },
42
+ "intermediate_size": 512,
43
+ "loader": {
44
+ "batch_size": "${div_up:${.global_batch_size}, ${eval:${trainer.devices} * ${trainer.num_nodes}}}",
45
+ "eval_batch_size": "${div_up:${.eval_global_batch_size}, ${eval:${trainer.devices} * ${trainer.num_nodes}}}",
46
+ "eval_global_batch_size": "${.global_batch_size}",
47
+ "global_batch_size": 512,
48
+ "num_workers": "${eval:\"len(__import__('os').sched_getaffinity(0))\"}",
49
+ "pin_memory": true
50
+ },
51
+ "max_input_ids_length": 4000,
52
+ "max_seq_len": 512,
53
+ "mode": "train",
54
+ "model": {
55
+ "custom_model_load_path": "./model/",
56
+ "deepspeed": null,
57
+ "kernel_injection": {
58
+ "enable": true
59
+ },
60
+ "length": 512,
61
+ "offload_optimizer": {
62
+ "device": "cpu",
63
+ "pin_memory": true
64
+ },
65
+ "offload_param": {
66
+ "device": "cpu",
67
+ "pin_memory": true
68
+ },
69
+ "pipeline": {
70
+ "enabled": true,
71
+ "num_stages": 8,
72
+ "stage_id": "${LOCAL_RANK}"
73
+ },
74
+ "save_dir": "./model",
75
+ "tensor_parallel": {
76
+ "enabled": true,
77
+ "size": 8
78
+ },
79
+ "zero_stage": 2
80
+ },
81
+ "model_type": "autodiffusion",
82
+ "multiple_of": 32,
83
+ "n_layers": 8,
84
+ "num_attention_heads": 8,
85
+ "optim": {
86
+ "beta1": 0.9,
87
+ "beta2": 0.999,
88
+ "eps": "1e-8",
89
+ "lr": "3e-4",
90
+ "weight_decay": 0
91
+ },
92
+ "rope_beta": 10000.0,
93
+ "rope_scaling_factor": 1.0,
94
+ "rope_scaling_type": "dynamic",
95
+ "sampling": {
96
+ "first_hitting": true,
97
+ "kv_cache": false,
98
+ "logdir": "./samples_${algo.name}_len${model.length}_blocksize${block_size}",
99
+ "noise_removal": false,
100
+ "nucleus_p": 1.0,
101
+ "num_sample_batches": 1,
102
+ "var_length": false
103
+ },
104
+ "seed": 42,
105
+ "tokenizer_cache": "./tokenizer/cache/FacebookAI/xlm-roberta-base",
106
+ "tokenizer_name": "FacebookAI/xlm-roberta-base",
107
+ "torch_dtype": "float32",
108
+ "training": {
109
+ "adam_beta1": 0.9,
110
+ "adam_beta2": 0.95,
111
+ "adam_epsilon": "1e-8",
112
+ "batch_size": 8,
113
+ "debug": false,
114
+ "epochs": 1,
115
+ "final_path": "./checkpoints/last.ckpt/final_model",
116
+ "fp16": false,
117
+ "fp16_opt_level": "O1",
118
+ "gradient_accumulation_steps": 1,
119
+ "gradient_checkpointing": false,
120
+ "gradient_checkpointing_kwargs": {},
121
+ "learning_rate": "1e-4",
122
+ "learning_rate_decay": 0.0,
123
+ "log_step": 100,
124
+ "max_grad_norm": 1.0,
125
+ "output_path": "./checkpoints/last.ckpt",
126
+ "seed": 42,
127
+ "steps_per_epoch": 100,
128
+ "warmup_steps": 1000,
129
+ "weight_decay": 0.0
130
+ },
131
+ "transformers_version": "4.51.3",
132
+ "vocab_size": 250002,
133
+ "wandb": {
134
+ "group": null,
135
+ "job_type": null,
136
+ "name": null,
137
+ "notes": "Block Denoising Discrete Diffusion Language Models",
138
+ "project": "autodiffusion"
139
+ }
140
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d29f4604ad902b9333b750cd541edd02b20f02dd795d924f9644a4b83b113b9
3
+ size 1032366384
random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:398e28fa41dc4179a48c98efe15e0a19f453a864a2ddc4b4320a19969c99764e
3
+ size 15580
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
3
+ size 17082734
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "XLMRobertaTokenizer",
54
+ "unk_token": "<unk>"
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c1d2e42a2a0d9ba0909ff5128e887b29b6ecd591483df42b3d5058d40a325c7
3
+ size 5304