lixiangchun commited on
Commit
c9e5de4
·
verified ·
1 Parent(s): 71df0a9

initial upload

Browse files

A very tiny BERT model trained on top gene rankings of more than 10 million cells.

checkpoint/checkpoint-154000/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "config.json",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "cls_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 256,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 1024,
14
+ "layer_norm_eps": 1e-12,
15
+ "mask_token_id": 4,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 6,
20
+ "pad_token_id": 0,
21
+ "position_embedding_type": "absolute",
22
+ "sep_token_id": 3,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.41.2",
25
+ "type_vocab_size": 2,
26
+ "unk_token_id": 1,
27
+ "use_cache": true,
28
+ "vocab_size": 21051
29
+ }
checkpoint/checkpoint-154000/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.41.2"
5
+ }
checkpoint/checkpoint-154000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77938668f182079c4bec8df74124a9535a9a2fdd96fbf24233f1d5ec0832ef9f
3
+ size 41400444
checkpoint/checkpoint-154000/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
checkpoint/checkpoint-154000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint/checkpoint-154000/tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "mask_token": "[MASK]",
47
+ "model_max_length": 512,
48
+ "pad_token": "[PAD]",
49
+ "sep_token": "[SEP]",
50
+ "tokenizer_class": "PreTrainedTokenizerFast",
51
+ "unk_token": "[UNK]"
52
+ }
checkpoint/checkpoint-154000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint/runs/Oct22_09-35-03_localhost.tmu/events.out.tfevents.1729561471.localhost.tmu.57176.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e864b0ccb62ecc71ba965ce6b1bbd551930aeb7487716a794afa961986a2cfed
3
+ size 177822
checkpoint/runs/Oct22_10-54-47_localhost.tmu/events.out.tfevents.1729566170.localhost.tmu.17941.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:843de1fc19a3c982187336d88cdecab4642bac70e8124c942a1e54b0122d3cc1
3
+ size 178081
checkpoint/runs/Oct22_12-47-27_localhost.tmu/events.out.tfevents.1729572989.localhost.tmu.113215.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ddb20078a677088da7ba8baa7938866fc481ecce7b469a873a17e357b15851a
3
+ size 193254
checkpoint/runs/Oct22_16-30-26_localhost.tmu/events.out.tfevents.1729586363.localhost.tmu.100709.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c047e480675bae4d8b28b59462fdef282763dc85ed34c045057a7490eb75aff6
3
+ size 2800710
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-1536-8-16-2023Feb25/config.json",
3
+ "_attn_implementation": "sdpa",
4
+ "architectures": [
5
+ "BertForMaskedLM"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "pad_token_id": 0,
10
+ "unk_token_id": 1,
11
+ "cls_token_id": 2,
12
+ "sep_token_id": 3,
13
+ "mask_token_id": 4,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "hidden_size": 256,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 1024,
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "bert",
22
+ "num_attention_heads": 8,
23
+ "num_hidden_layers": 6,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.25.0.dev0",
27
+ "type_vocab_size": 2,
28
+ "use_cache": true,
29
+ "vocab_size": 21051
30
+ }
31
+
tokenizer/bertbuildtokenizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tokenizers import Tokenizer
2
+ from tokenizers.models import WordLevel
3
+ from tokenizers.trainers import WordLevelTrainer
4
+ from tokenizers.pre_tokenizers import Whitespace
5
+ from transformers import PreTrainedTokenizerFast
6
+ from tokenizers.processors import TemplateProcessing
7
+ import os
8
+ import json
9
+
10
+ def build_tokenizer(files):
11
+ assert type(files) == list and len(files) > 0
12
+
13
+ # Build word-level tokenizer, i.e. tokenize sentences by whitespace.
14
+ tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
15
+ trainer = WordLevelTrainer(special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"])
16
+ tokenizer.pre_tokenizer = Whitespace()
17
+ tokenizer.train(files, trainer)
18
+
19
+ return tokenizer
20
+
21
+
22
+ def tokenizer_from_file(tokenizer_file):
23
+ tokenizer = Tokenizer.from_file(tokenizer_file)
24
+
25
+ #sentinel_tokens = [(f"<extra_id_{i}>", tokenizer.token_to_id(f"<extra_id_{i}>")) for i in range(100)]
26
+ # For BERT, we want our tokenizer to automatically add special tokens, like "[CLS]" or "[SEP]".
27
+ # GPT des not requires [CLS] and [SEP] at pretraining while BERT requires them.
28
+ #+https://swethatanamala.github.io/2018/12/24/summary-of-bert-paper/
29
+ # GPT converges faster by adding [BOS] and [EOS] than without [BOS] and [EOS].
30
+ tokenizer.post_processor = TemplateProcessing(
31
+ single="[CLS] $A [SEP]", # BERT
32
+ ##single="[BOS] $A [EOS]", # GPT
33
+ ##single="$A </s>",
34
+ pair="[CLS] $A [SEP] $B:1 [SEP]:1",
35
+ special_tokens=[
36
+ ("[PAD]", tokenizer.token_to_id("[PAD]")),
37
+ ("[UNK]", tokenizer.token_to_id("[UNK]")),
38
+ ("[CLS]", tokenizer.token_to_id("[CLS]")),
39
+ ("[SEP]", tokenizer.token_to_id("[SEP]")),
40
+ ("[MASK]", tokenizer.token_to_id("[MASK]")),
41
+ ],
42
+ )
43
+
44
+ # Instantiate with a tokenizer object
45
+ tokenizer = PreTrainedTokenizerFast(
46
+ tokenizer_object=tokenizer, model_max_length=512,
47
+ pad_token='[PAD]', unk_token='[UNK]', cls_token='[CLS]',
48
+ sep_token='[SEP]', mask_token='[MASK]')
49
+
50
+ return tokenizer
51
+
52
+ if not os.path.exists("tmp.json"):
53
+ tokenizer = build_tokenizer(files = ["gene_rank_merge_2021Aug25.txt", "../t5/t5finetune_data_flat.csv"])
54
+ tokenizer.save("tmp.json")
55
+
56
+ d=json.load(open("tmp.json"))
57
+
58
+ #for i in range(7, 107):
59
+ # d['added_tokens'].append({'id':i, 'special': True, 'content': f"<extra_id_{i-7}>",'single_word': False,'lstrip': False,'rstrip': False,'normalized': False})
60
+
61
+ vmax = 0
62
+ for k, v in d['model']['vocab'].items():
63
+ if v > vmax:
64
+ vmax = v
65
+
66
+ assert vmax + 1 == len(d['model']['vocab'])
67
+
68
+ for i in range(0, 100):
69
+ ##d['model']['vocab'][f"extra_id_{i}"] = vmax + 1 + i
70
+ d['model']['vocab'][f"unused{i}"] = vmax + 1 + i
71
+
72
+ with open('bert.json','w') as f:
73
+ json.dump(d, f)
74
+
75
+
76
+ tk = tokenizer_from_file("bert.json")
77
+ tk.save_pretrained("berttokenizer")
78
+
79
+
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 512, "pad_token": "[PAD]", "unk_token": "[UNK]", "cls_token": "[CLS]", "sep_token": "[SEP]", "mask_token": "[MASK]", "tokenizer_class": "PreTrainedTokenizerFast"}
train.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import copy
16
+ import logging
17
+ from dataclasses import dataclass, field
18
+ import pathlib
19
+ from typing import Dict, Optional, Sequence
20
+
21
+ import torch
22
+ import transformers
23
+ from torch.utils.data import Dataset
24
+ from transformers import Trainer
25
+ import numpy as np
26
+ import json
27
+
28
+ IGNORE_INDEX = -100
29
+
30
+ @dataclass
31
+ class ModelArguments:
32
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
33
+
34
+
35
+ @dataclass
36
+ class DataArguments:
37
+ data_path: str = field(default=None, metadata={"help": "Path to the training data."})
38
+
39
+
40
+ @dataclass
41
+ class TrainingArguments(transformers.TrainingArguments):
42
+ cache_dir: Optional[str] = field(default=None)
43
+ optim: str = field(default="adamw_torch")
44
+ model_max_length: int = field(
45
+ default=8192,
46
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
47
+ )
48
+
49
+ local_rank = None
50
+
51
+ def rank0_print(*args):
52
+ if local_rank == 0:
53
+ print(*args)
54
+
55
+ def bert_masking(input_ids, random_tokens, mask_token_id, mask_prob=0.15):
56
+ assert len(input_ids) > 1
57
+ if isinstance(input_ids, list):
58
+ input_ids = np.array(input_ids)
59
+ elif isinstance(input_ids, torch.Tensor):
60
+ input_ids = input_ids.numpy()
61
+ elif isinstance(input_ids, np.ndarray):
62
+ pass
63
+
64
+ labels = np.full_like(input_ids, IGNORE_INDEX) # Initialize labels with -100 (ignore index for loss calculation)
65
+
66
+ # We exclude the first and last tokens from being masked
67
+ num_tokens = len(input_ids)
68
+ valid_indices = np.arange(1, num_tokens - 1) # Ignore the first (index 0) and last (index -1) tokens
69
+
70
+ # Determine the number of tokens to mask (15% of total valid tokens)
71
+ num_mask = int(np.ceil(mask_prob * len(valid_indices)))
72
+
73
+ # Randomly choose indices to mask from the valid indices
74
+ mask_indices = np.random.choice(valid_indices, num_mask, replace=False)
75
+
76
+ for idx in mask_indices:
77
+ prob = np.random.rand() # Generate a random number between 0 and 1
78
+
79
+ if prob < 0.8:
80
+ # 80% of the time, replace with [MASK] token
81
+ labels[idx] = input_ids[idx]
82
+ input_ids[idx] = mask_token_id
83
+ elif prob < 0.9:
84
+ # 10% of the time, replace with a random token
85
+ labels[idx] = input_ids[idx]
86
+ input_ids[idx] = np.random.choice(random_tokens)
87
+ else:
88
+ # 10% of the time, keep the original token (but predict it)
89
+ labels[idx] = input_ids[idx]
90
+
91
+ input_ids = torch.from_numpy(input_ids)
92
+ labels = torch.from_numpy(labels)
93
+ return dict(input_ids=input_ids, labels=labels)
94
+
95
+ def is_not_special_token(token_name):
96
+ unused = token_name.startswith("unused")
97
+ is_special_token = (token_name in ["[CLS]", "[MASK]", "[PAD]", "[UNK]"])
98
+ flag = ((not unused) and (not is_special_token))
99
+ return flag
100
+
101
+ class SupervisedDataset(Dataset):
102
+ """Dataset for supervised fine-tuning."""
103
+
104
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizerFast):
105
+ super(SupervisedDataset, self).__init__()
106
+ logging.warning("Loading data...")
107
+ self.tokenizer = tokenizer
108
+ self.max_length = 64 # max number of genes
109
+ with open(data_path) as f:
110
+ self.list_data = [line.split()[0: self.max_length] for line in f if len(line.split()) >= self.max_length]
111
+
112
+ self.cached_input_ids = {}
113
+ self.random_tokens = [token_id for token_name, token_id in self.tokenizer.vocab.items() if is_not_special_token(token_name)]
114
+
115
+ def __len__(self):
116
+ return len(self.list_data)
117
+
118
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
119
+ if i in self.cached_input_ids:
120
+ input_ids = self.cached_input_ids[i]
121
+ else:
122
+ input_ids = self.tokenizer(self.list_data[i], is_split_into_words=True)["input_ids"]
123
+ self.cached_input_ids[i] = input_ids
124
+
125
+ inputs = bert_masking(input_ids, self.random_tokens, self.tokenizer.mask_token_id)
126
+ return inputs
127
+
128
+ @dataclass
129
+ class DataCollatorForSupervisedDataset(object):
130
+ """Collate examples for supervised fine-tuning."""
131
+
132
+ tokenizer: transformers.PreTrainedTokenizerFast
133
+
134
+ def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
135
+ input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
136
+ input_ids = torch.nn.utils.rnn.pad_sequence(
137
+ input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
138
+ )
139
+ labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
140
+ return dict(
141
+ input_ids=input_ids,
142
+ labels=labels,
143
+ attention_mask=(input_ids.ne(self.tokenizer.pad_token_id)).long(),
144
+ )
145
+
146
+
147
+ def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizerFast, data_args) -> Dict:
148
+ """Make dataset and collator for supervised fine-tuning."""
149
+ train_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path)
150
+ data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
151
+ return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
152
+
153
+
154
+ def train():
155
+ parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
156
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
157
+
158
+ #model = transformers.AutoModelForCausalLM.from_pretrained(
159
+ # model_args.model_name_or_path,
160
+ # cache_dir=training_args.cache_dir,
161
+ #)
162
+ config = transformers.AutoConfig.from_pretrained('config.json')
163
+ #model = transformers.OPTForCausalLM(config)
164
+ model = transformers.BertForMaskedLM(config)
165
+
166
+ model_size = sum(p.numel() for p in model.parameters() if p.requires_grad)/1e+6
167
+ rank0_print(model)
168
+ rank0_print(f"model_size: {model_size:.3f} Mb")
169
+
170
+ tokenizer = transformers.PreTrainedTokenizerFast.from_pretrained("tokenizer")
171
+
172
+ data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
173
+ trainer = Trainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
174
+
175
+ #trainer.train()
176
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
177
+ trainer.train(resume_from_checkpoint=True)
178
+ else:
179
+ trainer.train()
180
+
181
+ trainer.save_state()
182
+ trainer.save_model(output_dir=training_args.output_dir)
183
+
184
+
185
+ if __name__ == "__main__":
186
+ train()
train.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=4,5
2
+ ##--fsdp "full_shard auto_wrap" --fsdp_transformer_layer_cls_to_wrap 'OPTDecoderLayer' \
3
+ torchrun --nproc_per_node=2 --master_port=8060 train.py \
4
+ --data_path ../downstream_data/gene_ranking_20220803.txt \
5
+ --bf16 False \
6
+ --output_dir checkpoint \
7
+ --num_train_epochs 40 \
8
+ --per_device_train_batch_size 512 \
9
+ --per_device_eval_batch_size 4 \
10
+ --gradient_accumulation_steps 2 \
11
+ --evaluation_strategy "no" \
12
+ --save_strategy "steps" \
13
+ --save_steps 2000 \
14
+ --save_total_limit 1 \
15
+ --learning_rate 3e-4 \
16
+ --weight_decay 0.0 \
17
+ --warmup_ratio 0.03 \
18
+ --adam_beta1 0.90 \
19
+ --adam_beta2 0.95 \
20
+ --lr_scheduler_type "cosine" \
21
+ --logging_steps 10 \
22
+ --report_to tensorboard \
23
+ --tf32 True \
24
+ --dataloader_num_workers 2 \
25
+ --dataloader_persistent_workers True
26
+