AGofficial commited on
Commit
c676a94
·
verified ·
1 Parent(s): 84d0337

Upload 8 files

Browse files

Added these files from the zip and I did a bit of debugging.

Files changed (9) hide show
  1. .gitattributes +1 -0
  2. README.md +15 -8
  3. ShaNet.png +3 -0
  4. chat.py +101 -0
  5. collect.py +40 -0
  6. configurator.py +47 -0
  7. model.py +330 -0
  8. test.py +7 -0
  9. train.py +358 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ShaNet.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,8 +1,15 @@
1
- ---
2
- license: gpl-3.0
3
- datasets:
4
- - lmsys/lmsys-chat-1m
5
- language:
6
- - en
7
- pipeline_tag: text-generation
8
- ---
 
 
 
 
 
 
 
 
1
+
2
+ <img src="ShaNet.png" alt="ShaNet Banner" width="100%">
3
+
4
+ # ShaNet
5
+
6
+ ShaNet is a generative pre-trained transformer trained on conversational data, it is designed to understand and generate human-like text based on the input it receives. This model can be used for various applications such as chatbots, content generation, and more.
7
+
8
+ ## Features
9
+ - **Conversational Understanding**: Trained on a diverse dataset to understand context and nuances in conversations.
10
+ - **Text Generation**: Capable of generating coherent and contextually relevant text.
11
+ - **Customizable**: Can be fine-tuned for specific applications or domains.
12
+ - **Open Source**: Available for use and modification.
13
+
14
+ ## Installation
15
+ To install ShaNet, you can downlaod all files and run chat.py script.
ShaNet.png ADDED

Git LFS Details

  • SHA256: 8e3f2eb84a520f7526f42fdd923fe368b78fb0217f9de63c4db043d421852195
  • Pointer size: 132 Bytes
  • Size of remote file: 8.38 MB
chat.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import torch
4
+ import pickle
5
+ from model import GPTConfig, GPT
6
+ import tiktoken
7
+ from rich.traceback import install
8
+
9
+ install()
10
+
11
+ # ----- CONFIG -----
12
+ ckpt_path = 'out/ckpt.pt'
13
+ meta_path = 'data/mydata/meta.pkl'
14
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
+ tokenizer_name = 'cl100k_base'
16
+ max_new_tokens = 1024
17
+ temperature = 0.8
18
+ top_k = 100
19
+ special_tokens = {"<|endoftext|>", "<|im_start|>", "<|im_stop|>"}
20
+
21
+ # ----- LOAD TOKENIZER -----
22
+ enc = tiktoken.get_encoding(tokenizer_name)
23
+ encode = enc.encode
24
+ decode = enc.decode
25
+
26
+ # ----- LOAD METADATA -----
27
+ with open(meta_path, 'rb') as f:
28
+ meta = pickle.load(f)
29
+ vocab_size = meta['vocab_size']
30
+
31
+ # ----- LOAD CHECKPOINT -----
32
+ checkpoint = torch.load(ckpt_path, map_location=device)
33
+ model_args = checkpoint['model_args']
34
+ model_args['vocab_size'] = vocab_size
35
+ block_size = model_args.get('block_size', 1024)
36
+
37
+ # ----- INITIALIZE MODEL -----
38
+ model = GPT(GPTConfig(**model_args))
39
+ model.load_state_dict(checkpoint['model'])
40
+ model.to(device)
41
+ model.eval()
42
+
43
+ @torch.no_grad()
44
+ def generate_stream(model, input_ids, max_new_tokens, temperature=1.0, top_k=None):
45
+ model.eval()
46
+ special_token_id = encode("<|endoftext|>", allowed_special=special_tokens)[0]
47
+
48
+ for _ in range(max_new_tokens):
49
+ if input_ids.size(1) > block_size:
50
+ input_ids = input_ids[:, -block_size:]
51
+
52
+ logits, _ = model(input_ids)
53
+ logits = logits[:, -1, :] / temperature
54
+
55
+ if top_k is not None:
56
+ v, _ = torch.topk(logits, top_k)
57
+ logits[logits < v[:, [-1]]] = -float('Inf')
58
+
59
+ probs = torch.nn.functional.softmax(logits, dim=-1)
60
+ next_token = torch.multinomial(probs, num_samples=1)
61
+ next_token_id = next_token.item()
62
+
63
+ input_ids = torch.cat((input_ids, next_token), dim=1)
64
+
65
+ decoded_token = decode([next_token_id])
66
+ print(decoded_token, end='', flush=True) if decoded_token not in special_tokens else None
67
+
68
+ if next_token_id == special_token_id:
69
+ break
70
+
71
+ print() # Ensure newline after generation
72
+ return input_ids
73
+
74
+ def main():
75
+ print("🤖 AI Assistant is ready. Type 'exit' or press Ctrl+C to quit.\n")
76
+ try:
77
+ while True:
78
+ user_input = input("You: ")
79
+ if user_input.lower() in {"exit", "quit"}:
80
+ print("👋 Exiting assistant.")
81
+ break
82
+
83
+ prompt = f"""
84
+ <|im_start|>user
85
+ {user_input}<|endoftext|>
86
+ <|im_stop|>
87
+
88
+ <|im_start|>assistant
89
+
90
+ """
91
+ input_ids = torch.tensor(encode(prompt, allowed_special=special_tokens), dtype=torch.long, device=device)[None, ...]
92
+
93
+ print("🤖 Assistant:", end=' ', flush=True)
94
+ generate_stream(model, input_ids, max_new_tokens, temperature, top_k)
95
+ print("-" * 50)
96
+
97
+ except KeyboardInterrupt:
98
+ print("\n👋 Exiting assistant.")
99
+
100
+ if __name__ == "__main__":
101
+ main()
collect.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Download, transform LMSYS-Chat-1M into plain text for LLM completion models
4
+ in the format:
5
+ <|im_start|>role
6
+ message<|endoftext|>
7
+ <|im_stop|>
8
+
9
+ with 6 newlines between conversations.
10
+ """
11
+
12
+ from datasets import load_dataset
13
+ import sys
14
+
15
+ def main(output_path="lmsys_chat_1m.txt", split="train"):
16
+ ds = load_dataset("lmsys/lmsys-chat-1m", split=split)
17
+
18
+ with open(output_path, "w", encoding="utf-8") as out:
19
+ for i, sample in enumerate(ds):
20
+ conv = sample["conversation"] # list of messages
21
+
22
+ for msg in conv:
23
+ role = msg["role"]
24
+ content = msg["content"].strip()
25
+ out.write(f"<|im_start|>{role}\n{content}<|endoftext|>\n<|im_stop|>\n")
26
+
27
+ out.write("\n" * 6) # 6 newlines between conversations
28
+
29
+ if (i + 1) % 10000 == 0:
30
+ print(f"Processed {i + 1} conversations", file=sys.stderr)
31
+
32
+ print(f"✔ Saved plain-text to: {output_path}")
33
+
34
+ if __name__ == "__main__":
35
+ import argparse
36
+ p = argparse.ArgumentParser(description="Convert LMSYS-Chat-1M to LLM-friendly text format")
37
+ p.add_argument("--output", "-o", default="lmsys_chat_1m.txt", help="Output file path")
38
+ p.add_argument("--split", "-s", default="train", help="Dataset split (e.g. 'train')")
39
+ args = p.parse_args()
40
+ main(output_path=args.output, split=args.split)
configurator.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Poor Man's Configurator. Probably a terrible idea. Example usage:
3
+ $ python train.py config/override_file.py --batch_size=32
4
+ this will first run config/override_file.py, then override batch_size to 32
5
+
6
+ The code in this file will be run as follows from e.g. train.py:
7
+ >>> exec(open('configurator.py').read())
8
+
9
+ So it's not a Python module, it's just shuttling this code away from train.py
10
+ The code in this script then overrides the globals()
11
+
12
+ I know people are not going to love this, I just really dislike configuration
13
+ complexity and having to prepend config. to every single variable. If someone
14
+ comes up with a better simple Python solution I am all ears.
15
+ """
16
+
17
+ import sys
18
+ from ast import literal_eval
19
+
20
+ for arg in sys.argv[1:]:
21
+ if '=' not in arg:
22
+ # assume it's the name of a config file
23
+ assert not arg.startswith('--')
24
+ config_file = arg
25
+ print(f"Overriding config with {config_file}:")
26
+ with open(config_file) as f:
27
+ print(f.read())
28
+ exec(open(config_file).read())
29
+ else:
30
+ # assume it's a --key=value argument
31
+ assert arg.startswith('--')
32
+ key, val = arg.split('=')
33
+ key = key[2:]
34
+ if key in globals():
35
+ try:
36
+ # attempt to eval it it (e.g. if bool, number, or etc)
37
+ attempt = literal_eval(val)
38
+ except (SyntaxError, ValueError):
39
+ # if that goes wrong, just use the string
40
+ attempt = val
41
+ # ensure the types match ok
42
+ assert type(attempt) == type(globals()[key])
43
+ # cross fingers
44
+ print(f"Overriding: {key} = {attempt}")
45
+ globals()[key] = attempt
46
+ else:
47
+ raise ValueError(f"Unknown config key: {key}")
model.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Full definition of a GPT Language Model, all of it in this single file.
3
+ References:
4
+ 1) the official GPT-2 TensorFlow implementation released by OpenAI:
5
+ https://github.com/openai/gpt-2/blob/master/src/model.py
6
+ 2) huggingface/transformers PyTorch implementation:
7
+ https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py
8
+ """
9
+
10
+ import math
11
+ import inspect
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn import functional as F
17
+
18
+ class LayerNorm(nn.Module):
19
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
20
+
21
+ def __init__(self, ndim, bias):
22
+ super().__init__()
23
+ self.weight = nn.Parameter(torch.ones(ndim))
24
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
25
+
26
+ def forward(self, input):
27
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
28
+
29
+ class CausalSelfAttention(nn.Module):
30
+
31
+ def __init__(self, config):
32
+ super().__init__()
33
+ assert config.n_embd % config.n_head == 0
34
+ # key, query, value projections for all heads, but in a batch
35
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
36
+ # output projection
37
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
38
+ # regularization
39
+ self.attn_dropout = nn.Dropout(config.dropout)
40
+ self.resid_dropout = nn.Dropout(config.dropout)
41
+ self.n_head = config.n_head
42
+ self.n_embd = config.n_embd
43
+ self.dropout = config.dropout
44
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
45
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
46
+ if not self.flash:
47
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
48
+ # causal mask to ensure that attention is only applied to the left in the input sequence
49
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
50
+ .view(1, 1, config.block_size, config.block_size))
51
+
52
+ def forward(self, x):
53
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
54
+
55
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
56
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
57
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
58
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
59
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
60
+
61
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
62
+ if self.flash:
63
+ # efficient attention using Flash Attention CUDA kernels
64
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
65
+ else:
66
+ # manual implementation of attention
67
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
68
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
69
+ att = F.softmax(att, dim=-1)
70
+ att = self.attn_dropout(att)
71
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
72
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
73
+
74
+ # output projection
75
+ y = self.resid_dropout(self.c_proj(y))
76
+ return y
77
+
78
+ class MLP(nn.Module):
79
+
80
+ def __init__(self, config):
81
+ super().__init__()
82
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
83
+ self.gelu = nn.GELU()
84
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
85
+ self.dropout = nn.Dropout(config.dropout)
86
+
87
+ def forward(self, x):
88
+ x = self.c_fc(x)
89
+ x = self.gelu(x)
90
+ x = self.c_proj(x)
91
+ x = self.dropout(x)
92
+ return x
93
+
94
+ class Block(nn.Module):
95
+
96
+ def __init__(self, config):
97
+ super().__init__()
98
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
99
+ self.attn = CausalSelfAttention(config)
100
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
101
+ self.mlp = MLP(config)
102
+
103
+ def forward(self, x):
104
+ x = x + self.attn(self.ln_1(x))
105
+ x = x + self.mlp(self.ln_2(x))
106
+ return x
107
+
108
+ @dataclass
109
+ class GPTConfig:
110
+ block_size: int = 1024
111
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
112
+ n_layer: int = 12
113
+ n_head: int = 12
114
+ n_embd: int = 768
115
+ dropout: float = 0.0
116
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
117
+
118
+ class GPT(nn.Module):
119
+
120
+ def __init__(self, config):
121
+ super().__init__()
122
+ assert config.vocab_size is not None
123
+ assert config.block_size is not None
124
+ self.config = config
125
+
126
+ self.transformer = nn.ModuleDict(dict(
127
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
128
+ wpe = nn.Embedding(config.block_size, config.n_embd),
129
+ drop = nn.Dropout(config.dropout),
130
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
131
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
132
+ ))
133
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
134
+ # with weight tying when using torch.compile() some warnings get generated:
135
+ # "UserWarning: functional_call was passed multiple values for tied weights.
136
+ # This behavior is deprecated and will be an error in future versions"
137
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
138
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
139
+
140
+ # init all weights
141
+ self.apply(self._init_weights)
142
+ # apply special scaled init to the residual projections, per GPT-2 paper
143
+ for pn, p in self.named_parameters():
144
+ if pn.endswith('c_proj.weight'):
145
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
146
+
147
+ # report number of parameters
148
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
149
+
150
+ def get_num_params(self, non_embedding=True):
151
+ """
152
+ Return the number of parameters in the model.
153
+ For non-embedding count (default), the position embeddings get subtracted.
154
+ The token embeddings would too, except due to the parameter sharing these
155
+ params are actually used as weights in the final layer, so we include them.
156
+ """
157
+ n_params = sum(p.numel() for p in self.parameters())
158
+ if non_embedding:
159
+ n_params -= self.transformer.wpe.weight.numel()
160
+ return n_params
161
+
162
+ def _init_weights(self, module):
163
+ if isinstance(module, nn.Linear):
164
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
165
+ if module.bias is not None:
166
+ torch.nn.init.zeros_(module.bias)
167
+ elif isinstance(module, nn.Embedding):
168
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
169
+
170
+ def forward(self, idx, targets=None):
171
+ device = idx.device
172
+ b, t = idx.size()
173
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
174
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
175
+
176
+ # forward the GPT model itself
177
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
178
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
179
+ x = self.transformer.drop(tok_emb + pos_emb)
180
+ for block in self.transformer.h:
181
+ x = block(x)
182
+ x = self.transformer.ln_f(x)
183
+
184
+ if targets is not None:
185
+ # if we are given some desired targets also calculate the loss
186
+ logits = self.lm_head(x)
187
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
188
+ else:
189
+ # inference-time mini-optimization: only forward the lm_head on the very last position
190
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
191
+ loss = None
192
+
193
+ return logits, loss
194
+
195
+ def crop_block_size(self, block_size):
196
+ # model surgery to decrease the block size if necessary
197
+ # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
198
+ # but want to use a smaller block size for some smaller, simpler model
199
+ assert block_size <= self.config.block_size
200
+ self.config.block_size = block_size
201
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
202
+ for block in self.transformer.h:
203
+ if hasattr(block.attn, 'bias'):
204
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
205
+
206
+ @classmethod
207
+ def from_pretrained(cls, model_type, override_args=None):
208
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
209
+ override_args = override_args or {} # default to empty dict
210
+ # only dropout can be overridden see more notes below
211
+ assert all(k == 'dropout' for k in override_args)
212
+ from transformers import GPT2LMHeadModel
213
+ print("loading weights from pretrained gpt: %s" % model_type)
214
+
215
+ # n_layer, n_head and n_embd are determined from model_type
216
+ config_args = {
217
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
218
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
219
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
220
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
221
+ }[model_type]
222
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
223
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
224
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
225
+ config_args['bias'] = True # always True for GPT model checkpoints
226
+ # we can override the dropout rate, if desired
227
+ if 'dropout' in override_args:
228
+ print(f"overriding dropout rate to {override_args['dropout']}")
229
+ config_args['dropout'] = override_args['dropout']
230
+ # create a from-scratch initialized minGPT model
231
+ config = GPTConfig(**config_args)
232
+ model = GPT(config)
233
+ sd = model.state_dict()
234
+ sd_keys = sd.keys()
235
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
236
+
237
+ # init a huggingface/transformers model
238
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
239
+ sd_hf = model_hf.state_dict()
240
+
241
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
242
+ sd_keys_hf = sd_hf.keys()
243
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
244
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
245
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
246
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
247
+ # this means that we have to transpose these weights when we import them
248
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
249
+ for k in sd_keys_hf:
250
+ if any(k.endswith(w) for w in transposed):
251
+ # special treatment for the Conv1D weights we need to transpose
252
+ assert sd_hf[k].shape[::-1] == sd[k].shape
253
+ with torch.no_grad():
254
+ sd[k].copy_(sd_hf[k].t())
255
+ else:
256
+ # vanilla copy over the other parameters
257
+ assert sd_hf[k].shape == sd[k].shape
258
+ with torch.no_grad():
259
+ sd[k].copy_(sd_hf[k])
260
+
261
+ return model
262
+
263
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
264
+ # start with all of the candidate parameters
265
+ param_dict = {pn: p for pn, p in self.named_parameters()}
266
+ # filter out those that do not require grad
267
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
268
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
269
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
270
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
271
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
272
+ optim_groups = [
273
+ {'params': decay_params, 'weight_decay': weight_decay},
274
+ {'params': nodecay_params, 'weight_decay': 0.0}
275
+ ]
276
+ num_decay_params = sum(p.numel() for p in decay_params)
277
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
278
+ print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
279
+ print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
280
+ # Create AdamW optimizer and use the fused version if it is available
281
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
282
+ use_fused = fused_available and device_type == 'cuda'
283
+ extra_args = dict(fused=True) if use_fused else dict()
284
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
285
+ print(f"using fused AdamW: {use_fused}")
286
+
287
+ return optimizer
288
+
289
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
290
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
291
+ # first estimate the number of flops we do per iteration.
292
+ # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
293
+ N = self.get_num_params()
294
+ cfg = self.config
295
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
296
+ flops_per_token = 6*N + 12*L*H*Q*T
297
+ flops_per_fwdbwd = flops_per_token * T
298
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
299
+ # express our flops throughput as ratio of A100 bfloat16 peak flops
300
+ flops_achieved = flops_per_iter * (1.0/dt) # per second
301
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
302
+ mfu = flops_achieved / flops_promised
303
+ return mfu
304
+
305
+ @torch.no_grad()
306
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
307
+ """
308
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
309
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
310
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
311
+ """
312
+ for _ in range(max_new_tokens):
313
+ # if the sequence context is growing too long we must crop it at block_size
314
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
315
+ # forward the model to get the logits for the index in the sequence
316
+ logits, _ = self(idx_cond)
317
+ # pluck the logits at the final step and scale by desired temperature
318
+ logits = logits[:, -1, :] / temperature
319
+ # optionally crop the logits to only the top k options
320
+ if top_k is not None:
321
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
322
+ logits[logits < v[:, [-1]]] = -float('Inf')
323
+ # apply softmax to convert logits to (normalized) probabilities
324
+ probs = F.softmax(logits, dim=-1)
325
+ # sample from the distribution
326
+ idx_next = torch.multinomial(probs, num_samples=1)
327
+ # append sampled index to the running sequence and continue
328
+ idx = torch.cat((idx, idx_next), dim=1)
329
+
330
+ return idx
test.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ try:
4
+ ckpt = torch.load("out/ckpt.pt", map_location="cpu")
5
+ print("✅ Checkpoint has been loaded successfully")
6
+ except Exception as e:
7
+ print("❌ Failed to load the checkpoint:", e)
train.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import time
4
+ import math
5
+ import pickle
6
+ from contextlib import nullcontext
7
+ # note from ag: you may need to manually change the name of the trained model to match the name expected in the test.py chat.py and other scripts, also really impressive work here.
8
+ import numpy as np
9
+ import torch
10
+ from torch.nn.parallel import DistributedDataParallel as DDP
11
+ from torch.distributed import init_process_group, destroy_process_group
12
+
13
+ import tiktoken
14
+ from rich.traceback import install
15
+ install()
16
+ from model import GPTConfig, GPT
17
+
18
+ # -------------------------------------------------------------------------------
19
+ # SPECIAL TOKENS for tokenizer (edit here as needed)
20
+ SPECIAL_TOKENS = {'<|im_start|>', '<|im_end|>', '<|system|>', '<|user|>', '<|assistant|>', "<|im_start|>", "<|endoftext|>", "<|endofprompt|>"}
21
+ print(f"ℹ️ Using special tokens: {SPECIAL_TOKENS}")
22
+
23
+ # -------------------------------------------------------------------------------
24
+ # DEFAULT CONFIG — override via CLI or `configurator.py`
25
+ out_dir = 'out'
26
+ eval_interval = 95
27
+ log_interval = 1
28
+ eval_iters = 95
29
+ eval_only = False # if True, exit after first eval
30
+ always_save_checkpoint = True # forces save every eval
31
+
32
+ init_from = 'resume' # 'scratch' | 'resume' | 'gpt2*'
33
+
34
+ wandb_log = False
35
+ wandb_project = 'owt'
36
+ wandb_run_name= 'run' + str(time.time())
37
+
38
+ # Data / Tokenization
39
+ dataset = 'mydata' # subfolder under data/
40
+ data_file = 'lmsys_chat_1m.txt'
41
+ tokenizer_name = 'cl100k_base'
42
+ token_dtype = 'uint32' # must hold up to tokenizer.n_vocab
43
+
44
+ # Model architecture
45
+ n_layer = 1 # reduced to 3 layers
46
+ n_head = 16 # keep heads high for representation capacity
47
+ n_embd = 1024 # increased from 1280 → 1024 for stability and efficiency
48
+ dropout = 0.05 # lower dropout since underfitting may occur
49
+ bias = True
50
+
51
+ # Optimizer
52
+ learning_rate = 3e-4
53
+ max_iters = 20000
54
+ weight_decay = 0.05 # use 0.1 if batch size is large
55
+ beta1 = 0.9
56
+ beta2 = 0.98
57
+ grad_clip = 1.0
58
+
59
+ # LR schedule
60
+ decay_lr = True
61
+ warmup_iters = 100 # faster warmup for shallow models
62
+ lr_decay_iters = 10000 # align with max_iters for sharper decay
63
+ min_lr = 1e-5
64
+
65
+ # Batch & block sizes
66
+ batch_size = 4 # increase batch size if GPU RAM allows
67
+ gradient_accumulation_steps = 5 * 4 # adjust accordingly to match effective batch size
68
+ block_size = 1024 # keep same for compatibility
69
+
70
+
71
+ # DDP
72
+ backend = 'nccl'
73
+
74
+ # Precision / compilation
75
+ device = 'cuda'
76
+ dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16'
77
+ compile = False # set to True on Linux with Triton installed
78
+
79
+ # Checkpointing
80
+ save_interval = 200 # also save every N steps
81
+ checkpoint_limit = None # keep only last N checkpoints (None == keep all)
82
+ # -------------------------------------------------------------------------------
83
+
84
+ # allow overrides via CLI / configurator.py
85
+ config_keys = [k for k,v in globals().items()
86
+ if not k.startswith('_') and isinstance(v, (int,float,bool,str,list))]
87
+ exec(open('configurator.py').read()) # override from CLI or config
88
+ config = {k: globals()[k] for k in config_keys}
89
+
90
+ # -----------------------------------------------------------------------------
91
+ # AUTO-PREPROCESSING: data.txt → train.bin / val.bin + meta.pkl
92
+ data_dir = os.path.join('data', dataset)
93
+ train_bin_path = os.path.join(data_dir, 'train.bin')
94
+ val_bin_path = os.path.join(data_dir, 'val.bin')
95
+ meta_path = os.path.join(data_dir, 'meta.pkl')
96
+ dtype_token = np.dtype(token_dtype)
97
+
98
+ if not (os.path.exists(train_bin_path) and os.path.exists(val_bin_path) and os.path.exists(meta_path)):
99
+ print(f"ℹ️ Preprocessing raw text from {data_file} ...")
100
+ raw_text = open(data_file, 'r', encoding='utf-8').read()
101
+ enc = tiktoken.get_encoding(tokenizer_name)
102
+ encode = enc.encode
103
+ vocab_size= enc.n_vocab
104
+
105
+ # ensure dtype can hold vocab_size
106
+ if np.issubdtype(dtype_token, np.integer):
107
+ info = np.iinfo(dtype_token)
108
+ if info.max < vocab_size:
109
+ raise ValueError(f"token_dtype={token_dtype} max={info.max} < vocab_size={vocab_size}")
110
+
111
+ tokens = np.array(encode(raw_text, allowed_special=SPECIAL_TOKENS), dtype=dtype_token)
112
+ n = tokens.shape[0]
113
+ split = int(0.9 * n)
114
+ train_tokens = tokens[:split]
115
+ val_tokens = tokens[split:]
116
+
117
+ os.makedirs(data_dir, exist_ok=True)
118
+ train_tokens.tofile(train_bin_path)
119
+ val_tokens.tofile(val_bin_path)
120
+ with open(meta_path, 'wb') as f:
121
+ pickle.dump({
122
+ 'vocab_size': vocab_size,
123
+ 'tokenizer': tokenizer_name,
124
+ 'token_dtype': token_dtype,
125
+ 'special_tokens': SPECIAL_TOKENS,
126
+ }, f)
127
+ print(f"✅ Wrote {train_bin_path} ({train_tokens.nbytes} bytes), "
128
+ f"{val_bin_path} ({val_tokens.nbytes} bytes), and {meta_path}")
129
+
130
+ # -----------------------------------------------------------------------------
131
+ # DDP or single-GPU
132
+ ddp = int(os.environ.get('RANK', -1)) != -1
133
+ if ddp:
134
+ init_process_group(backend=backend)
135
+ ddp_rank = int(os.environ['RANK'])
136
+ ddp_local_rank = int(os.environ['LOCAL_RANK'])
137
+ ddp_world_size = int(os.environ['WORLD_SIZE'])
138
+ device = f'cuda:{ddp_local_rank}'
139
+ torch.cuda.set_device(device)
140
+ master_process = (ddp_rank == 0)
141
+ seed_offset = ddp_rank
142
+ assert gradient_accumulation_steps % ddp_world_size == 0
143
+ gradient_accumulation_steps //= ddp_world_size
144
+ else:
145
+ master_process = True
146
+ seed_offset = 0
147
+ ddp_world_size = 1
148
+
149
+ tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size
150
+ print(f"ℹ️ tokens per iteration = {tokens_per_iter:,}")
151
+
152
+ if master_process:
153
+ os.makedirs(out_dir, exist_ok=True)
154
+ torch.manual_seed(1337 + seed_offset)
155
+ torch.backends.cuda.matmul.allow_tf32 = True
156
+ torch.backends.cudnn.allow_tf32 = True
157
+ device_type = 'cuda' if 'cuda' in device else 'cpu'
158
+ ptdtype = {'float32':torch.float32, 'bfloat16':torch.bfloat16, 'float16':torch.float16}[dtype]
159
+ ctx = nullcontext() if device_type=='cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
160
+
161
+ # -----------------------------------------------------------------------------
162
+ # BATCH LOADER
163
+ def get_batch(split):
164
+ data = np.memmap(os.path.join(data_dir, f'{split}.bin'),
165
+ dtype=dtype_token, mode='r')
166
+ ix = torch.randint(len(data) - block_size, (batch_size,))
167
+ x = torch.stack([torch.from_numpy(data[i:i+block_size].astype(np.int64)) for i in ix])
168
+ y = torch.stack([torch.from_numpy(data[i+1:i+1+block_size].astype(np.int64)) for i in ix])
169
+ if device_type == 'cuda':
170
+ x,y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
171
+ else:
172
+ x,y = x.to(device), y.to(device)
173
+ return x, y
174
+
175
+ # -----------------------------------------------------------------------------
176
+ # MODEL INIT / RESUME
177
+ iter_num = 0
178
+ best_val_loss = 1e9
179
+
180
+ meta = pickle.load(open(meta_path,'rb'))
181
+ vocab_size = meta['vocab_size']
182
+
183
+ model_args = dict(
184
+ n_layer = n_layer,
185
+ n_head = n_head,
186
+ n_embd = n_embd,
187
+ block_size = block_size,
188
+ bias = bias,
189
+ vocab_size = vocab_size,
190
+ dropout = dropout,
191
+ )
192
+
193
+ if init_from == 'scratch':
194
+ print("ℹ️ Initializing new model from scratch")
195
+ model = GPT(GPTConfig(**model_args))
196
+
197
+ elif init_from == 'resume':
198
+ print(f"ℹ️ Resuming from {out_dir}")
199
+ ckpt = torch.load(os.path.join(out_dir,'ckpt.pt'), map_location=device)
200
+ for k in ['n_layer','n_head','n_embd','block_size','bias','vocab_size']:
201
+ model_args[k] = ckpt['model_args'][k]
202
+ model = GPT(GPTConfig(**model_args))
203
+ state = ckpt['model']
204
+ for key in list(state.keys()):
205
+ if key.startswith('_orig_mod.'):
206
+ state[key[len('_orig_mod.'):]] = state.pop(key)
207
+ model.load_state_dict(state)
208
+ iter_num = ckpt['iter_num']
209
+ best_val_loss = ckpt['best_val_loss']
210
+
211
+ elif init_from.startswith('gpt2'):
212
+ print(f"ℹ️ Initializing from OpenAI GPT-2 weights: {init_from}")
213
+ override = dict(dropout=dropout)
214
+ model = GPT.from_pretrained(init_from, override)
215
+ for k in ['n_layer','n_head','n_embd','block_size','bias','vocab_size']:
216
+ model_args[k] = getattr(model.config, k)
217
+
218
+ if block_size < model.config.block_size:
219
+ model.crop_block_size(block_size)
220
+ model_args['block_size'] = block_size
221
+
222
+ model.to(device)
223
+ scaler = torch.cuda.amp.GradScaler(enabled=(dtype=='float16'))
224
+ optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1,beta2), device_type)
225
+ if init_from == 'resume':
226
+ optimizer.load_state_dict(ckpt['optimizer'])
227
+
228
+ # -----------------------------------------------------------------------------
229
+ # COMPILE & DDP WRAP
230
+ if compile:
231
+ print("ℹ️ Compiling the model...")
232
+ model = torch.compile(model)
233
+ if ddp:
234
+ model = DDP(model, device_ids=[ddp_local_rank])
235
+
236
+ raw_model = model.module if ddp else model
237
+
238
+ # -----------------------------------------------------------------------------
239
+ # INITIAL CHECKPOINT at step 0
240
+ if master_process:
241
+ ckpt = {
242
+ 'model': raw_model.state_dict(),
243
+ 'optimizer': optimizer.state_dict(),
244
+ 'model_args': model_args,
245
+ 'iter_num': iter_num,
246
+ 'best_val_loss': best_val_loss,
247
+ 'config': config,
248
+ }
249
+ ckpt_path = os.path.join(out_dir, f'ckpt_{iter_num:06d}.pt')
250
+ print(f"💾 Saving initial checkpoint to {ckpt_path}")
251
+ torch.save(ckpt, ckpt_path)
252
+
253
+ # -----------------------------------------------------------------------------
254
+ # LOSS ESTIMATE
255
+ @torch.no_grad()
256
+ def estimate_loss():
257
+ out = {}
258
+ model.eval()
259
+ for split in ('train','val'):
260
+ losses = torch.zeros(eval_iters)
261
+ for k in range(eval_iters):
262
+ X,Y = get_batch(split)
263
+ with ctx:
264
+ _, loss = model(X,Y)
265
+ losses[k] = loss.item()
266
+ out[split] = losses.mean().item()
267
+ model.train()
268
+ return out
269
+
270
+ def get_lr(it):
271
+ if it < warmup_iters:
272
+ return learning_rate * (it+1) / (warmup_iters+1)
273
+ if it > lr_decay_iters:
274
+ return min_lr
275
+ decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
276
+ coeff = 0.5 * (1 + math.cos(math.pi * decay_ratio))
277
+ return min_lr + coeff * (learning_rate - min_lr)
278
+
279
+ if wandb_log and master_process:
280
+ import wandb
281
+ wandb.init(project=wandb_project, name=wandb_run_name, config=config)
282
+
283
+ # -----------------------------------------------------------------------------
284
+ # TRAINING LOOP
285
+ X, Y = get_batch('train')
286
+ t0 = time.time()
287
+ local_iter = 0
288
+ while True:
289
+ lr = get_lr(iter_num) if decay_lr else learning_rate
290
+ for pg in optimizer.param_groups:
291
+ pg['lr'] = lr
292
+
293
+ if iter_num % eval_interval == 0 and master_process:
294
+ losses = estimate_loss()
295
+ print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
296
+ if wandb_log:
297
+ wandb.log({"iter":iter_num, "train/loss":losses['train'], "val/loss":losses['val'], "lr":lr})
298
+
299
+ should_save = (
300
+ losses['val'] < best_val_loss
301
+ or always_save_checkpoint
302
+ or (iter_num % save_interval == 0)
303
+ )
304
+ if should_save and iter_num > 0:
305
+ best_val_loss = min(best_val_loss, losses['val'])
306
+ ckpt = {
307
+ 'model': raw_model.state_dict(),
308
+ 'optimizer': optimizer.state_dict(),
309
+ 'model_args': model_args,
310
+ 'iter_num': iter_num,
311
+ 'best_val_loss': best_val_loss,
312
+ 'config': config,
313
+ }
314
+ ckpt_path = os.path.join(out_dir, f'ckpt_{iter_num:06d}.pt')
315
+ print(f"💾 Saving checkpoint to {ckpt_path}")
316
+ torch.save(ckpt, ckpt_path)
317
+ if checkpoint_limit is not None:
318
+ all_ckpts = sorted(f for f in os.listdir(out_dir)
319
+ if f.startswith('ckpt_') and f.endswith('.pt'))
320
+ for old in all_ckpts[:-checkpoint_limit]:
321
+ os.remove(os.path.join(out_dir, old))
322
+
323
+ if iter_num == 0 and eval_only:
324
+ break
325
+
326
+ for micro in range(gradient_accumulation_steps):
327
+ if ddp:
328
+ model.require_backward_grad_sync = (micro == gradient_accumulation_steps - 1)
329
+ with ctx:
330
+ logits, loss = model(X, Y)
331
+ loss = loss / gradient_accumulation_steps
332
+ X, Y = get_batch('train')
333
+ scaler.scale(loss).backward()
334
+
335
+ if grad_clip != 0.0:
336
+ scaler.unscale_(optimizer)
337
+ torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
338
+ scaler.step(optimizer)
339
+ scaler.update()
340
+ optimizer.zero_grad(set_to_none=True)
341
+
342
+ dt = time.time() - t0
343
+ t0 = time.time()
344
+ if iter_num % log_interval == 0 and master_process:
345
+ lossf = loss.item() * gradient_accumulation_steps
346
+ if local_iter >= 5:
347
+ mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
348
+ print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms, mfu {mfu*100:.2f}%")
349
+ else:
350
+ print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms")
351
+
352
+ iter_num += 1
353
+ local_iter += 1
354
+ if iter_num > max_iters:
355
+ break
356
+
357
+ if ddp:
358
+ destroy_process_group()