Text Generation
English
research
boyuia commited on
Commit
f4c3c19
·
verified ·
1 Parent(s): 79c1c07

Added model source.

Browse files
Files changed (3) hide show
  1. README.md +27 -3
  2. dataset.jsonl +0 -0
  3. model.py +267 -0
README.md CHANGED
@@ -1,3 +1,27 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Leaf
2
+
3
+ An open source "prototype" AI model used for AI research.
4
+
5
+ ## About this project
6
+
7
+ Leaf is an "experimental" AI model, utilising PyTorch.
8
+
9
+ ## Research
10
+
11
+ With leaf we've been testing many capabilities of what AI could do.
12
+
13
+ Starting with a simple "embedded" python dataset, leaf uses only 2700 steps for training (the more steps, the better it learns).
14
+
15
+ **Training Data:** `
16
+ {"this is a much longer text that will serve as a simple dataset for our tiny language model. The model will learn to predict the next character based on the previous characters in the sequence."}
17
+ {"text": "This demonstrates the core idea behind training an autoregressive language model. The quick brown fox jumps over the lazy dog."}
18
+ {"text": "A journey of a thousand miles begins with a single step. The early bird catches the worm. All that glitters is not gold. A stitch in time saves nine."}
19
+ {"text": "Where there's a will, there's a way. Look before you leap. You can't make an omelette without breaking a few eggs. Practice makes perfect. Don't count your chickens before they hatch."}`
20
+
21
+ However this result came with the following output:
22
+
23
+ `text that will serve`
24
+
25
+ Then we used JSONL databases from the community, and unfortunatly this was the output:
26
+
27
+ `rimetricE7tich then`
dataset.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
model.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn import functional as F
4
+ import json
5
+ import os
6
+
7
+ # --- Hyperparameters ---
8
+ # These are the settings for our model. You can experiment with these values.
9
+ batch_size = 32 # How many sequences to process in parallel
10
+ block_size = 8 # Maximum context length for predictions
11
+ max_iters = 3000 # Number of training iterations
12
+ eval_interval = 300 # How often to evaluate the model
13
+ learning_rate = 1e-2 # The learning rate for the optimizer
14
+ device = 'cuda' if torch.cuda.is_available() else 'cpu' # Use GPU if available
15
+ eval_iters = 200 # Number of iterations for evaluation
16
+ n_embd = 32 # The dimension of the token embeddings
17
+ n_head = 4 # The number of attention heads in the Multi-Head Attention block
18
+ n_layer = 4 # The number of Transformer blocks
19
+ dropout = 0.0 # Dropout rate for regularization
20
+
21
+ # --- Data Preparation ---
22
+ # To use this code, you need to create a file named 'dataset.jsonl'
23
+ # in the same directory as this script. Each line of the file should be a JSON object
24
+ # with 'header' and 'formal_statement' keys, like the example you provided.
25
+ file_path = 'dataset.jsonl'
26
+
27
+ # Process the JSONL data from the file.
28
+ corpus = ""
29
+ try:
30
+ with open(file_path, 'r') as f:
31
+ for line in f:
32
+ data_point = json.loads(line)
33
+ # Combine the 'header' and 'formal_statement' fields.
34
+ # We add a newline character to separate the two parts of the text.
35
+ corpus += data_point['header'] + '\n' + data_point['formal_statement'] + '\n'
36
+ except FileNotFoundError:
37
+ print(f"Error: The file '{file_path}' was not found. Please create it and add your data.")
38
+ exit()
39
+ except json.JSONDecodeError:
40
+ print(f"Error: There was a problem parsing a line in '{file_path}'. Make sure each line is a valid JSON object.")
41
+ exit()
42
+ except KeyError:
43
+ print(f"Error: A line in '{file_path}' does not have the 'header' or 'formal_statement' keys. Please check your JSONL file format.")
44
+ exit()
45
+
46
+ # Check if the corpus is empty after loading the file.
47
+ if not corpus:
48
+ print(f"Error: The corpus is empty. This could be because '{file_path}' is empty or contains no valid text.")
49
+ exit()
50
+
51
+ # Here we create a simple character-level tokenizer.
52
+ # The vocabulary consists of all unique characters in the text.
53
+ chars = sorted(list(set(corpus)))
54
+ vocab_size = len(chars)
55
+ stoi = {ch: i for i, ch in enumerate(chars)}
56
+ itos = {i: ch for i, ch in enumerate(chars)}
57
+ # Fix the bug in the encode function. The loop variable was 's' instead of 'c'.
58
+ encode = lambda s: [stoi[c] for c in s]
59
+ decode = lambda l: ''.join([itos[i] for i in l])
60
+
61
+ # Convert the entire text into a PyTorch tensor.
62
+ data = torch.tensor(encode(corpus), dtype=torch.long)
63
+
64
+ # Create a simple train/validation split.
65
+ n = int(0.9 * len(data))
66
+ train_data = data[:n]
67
+ val_data = data[n:]
68
+
69
+ # --- Helper Functions ---
70
+ # This function gets a random batch of data from either the training or validation set.
71
+ def get_batch(split):
72
+ data = train_data if split == 'train' else val_data
73
+ # Generate random starting indices for each sequence in the batch.
74
+ ix = torch.randint(len(data) - block_size, (batch_size,))
75
+ # Stack the sequences to create a batch.
76
+ x = torch.stack([data[i:i + block_size] for i in ix])
77
+ y = torch.stack([data[i + 1:i + block_size + 1] for i in ix])
78
+ x, y = x.to(device), y.to(device)
79
+ return x, y
80
+
81
+ # This function is used to estimate the model's loss on both the train and validation sets.
82
+ # It uses torch.no_grad() to make the process more efficient as we're not training.
83
+ @torch.no_grad()
84
+ def estimate_loss():
85
+ out = {}
86
+ model.eval() # Set the model to evaluation mode.
87
+ for split in ['train', 'val']:
88
+ losses = torch.zeros(eval_iters)
89
+ for k in range(eval_iters):
90
+ X, Y = get_batch(split)
91
+ logits, loss = model(X, Y)
92
+ losses[k] = loss.item()
93
+ out[split] = losses.mean()
94
+ model.train() # Set the model back to training mode.
95
+ return out
96
+
97
+ # --- The Self-Attention Mechanism ---
98
+ # This is a single attention head.
99
+ class Head(nn.Module):
100
+ def __init__(self, head_size):
101
+ super().__init__()
102
+ # Linear layers to project the input into key, query, and value vectors.
103
+ self.key = nn.Linear(n_embd, head_size, bias=False)
104
+ self.query = nn.Linear(n_embd, head_size, bias=False)
105
+ self.value = nn.Linear(n_embd, head_size, bias=False)
106
+ # A buffer to store a lower-triangular matrix, which prevents future tokens from
107
+ # "seeing" past tokens (decoder-style attention).
108
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
109
+ # Dropout layer for regularization.
110
+ self.dropout = nn.Dropout(dropout)
111
+
112
+ def forward(self, x):
113
+ B, T, C = x.shape
114
+ k = self.key(x) # (B, T, head_size)
115
+ q = self.query(x) # (B, T, head_size)
116
+
117
+ # Compute the affinity scores (weights).
118
+ # (q @ k.transpose(-2, -1)) is matrix multiplication of q and k transpose.
119
+ wei = q @ k.transpose(-2, -1) * C**-0.5 # (B, T, head_size) @ (B, head_size, T) -> (B, T, T)
120
+ # Apply the lower-triangular mask to enforce causality.
121
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
122
+ # Apply softmax to get the attention weights.
123
+ wei = F.softmax(wei, dim=-1)
124
+ self.dropout(wei)
125
+
126
+ v = self.value(x) # (B, T, head_size)
127
+ out = wei @ v # (B, T, T) @ (B, T, head_size) -> (B, T, head_size)
128
+ return out
129
+
130
+ # This combines multiple attention heads in parallel.
131
+ class MultiHeadAttention(nn.Module):
132
+ def __init__(self, num_heads, head_size):
133
+ super().__init__()
134
+ # Create a list of `Head` modules.
135
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
136
+ # A final linear layer to project the concatenated output of all heads.
137
+ self.proj = nn.Linear(num_heads * head_size, n_embd)
138
+ self.dropout = nn.Dropout(dropout)
139
+
140
+ def forward(self, x):
141
+ # Concatenate the output from each head.
142
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
143
+ out = self.dropout(self.proj(out))
144
+ return out
145
+
146
+ # This is a simple feed-forward network.
147
+ class FeedFoward(nn.Module):
148
+ def __init__(self, n_embd):
149
+ super().__init__()
150
+ # A simple linear-ReLU-linear stack.
151
+ self.net = nn.Sequential(
152
+ nn.Linear(n_embd, 4 * n_embd),
153
+ nn.ReLU(),
154
+ nn.Linear(4 * n_embd, n_embd),
155
+ nn.Dropout(dropout),
156
+ )
157
+
158
+ def forward(self, x):
159
+ return self.net(x)
160
+
161
+ # This is a single Transformer block, composed of Multi-Head Attention and a Feed-Forward network.
162
+ class TransformerBlock(nn.Module):
163
+ def __init__(self, n_embd, n_head):
164
+ super().__init__()
165
+ head_size = n_embd // n_head
166
+ # The attention mechanism.
167
+ self.sa = MultiHeadAttention(n_head, head_size)
168
+ # The feed-forward network.
169
+ self.ffwd = FeedFoward(n_embd)
170
+ # Layer normalization layers.
171
+ self.ln1 = nn.LayerNorm(n_embd)
172
+ self.ln2 = nn.LayerNorm(n_embd)
173
+
174
+ def forward(self, x):
175
+ # Apply self-attention with a residual connection and layer normalization.
176
+ x = x + self.sa(self.ln1(x))
177
+ # Apply feed-forward with another residual connection and layer normalization.
178
+ x = x + self.ffwd(self.ln2(x))
179
+ return x
180
+
181
+ # --- The Main Language Model ---
182
+ class LanguageModel(nn.Module):
183
+ def __init__(self):
184
+ super().__init__()
185
+ # A token embedding table: each integer token gets a vector representation.
186
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
187
+ # A positional embedding table: each position gets a vector representation.
188
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
189
+ # A sequence of Transformer blocks.
190
+ self.blocks = nn.Sequential(*[TransformerBlock(n_embd, n_head) for _ in range(n_layer)])
191
+ # A final layer normalization.
192
+ self.ln_f = nn.LayerNorm(n_embd)
193
+ # A linear layer to project the final embeddings to the vocabulary size.
194
+ self.lm_head = nn.Linear(n_embd, vocab_size)
195
+
196
+ def forward(self, idx, targets=None):
197
+ B, T = idx.shape
198
+
199
+ # Get token embeddings and positional embeddings.
200
+ tok_emb = self.token_embedding_table(idx) # (B, T, C)
201
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T, C)
202
+ # Add them together to get the final embeddings.
203
+ x = tok_emb + pos_emb # (B, T, C)
204
+ # Pass through the Transformer blocks.
205
+ x = self.blocks(x)
206
+ x = self.ln_f(x)
207
+ # Project to the vocabulary size.
208
+ logits = self.lm_head(x) # (B, T, vocab_size)
209
+
210
+ loss = None
211
+ if targets is not None:
212
+ # Reshape for cross-entropy loss calculation.
213
+ B, T, C = logits.shape
214
+ logits = logits.view(B * T, C)
215
+ targets = targets.view(B * T)
216
+ loss = F.cross_entropy(logits, targets)
217
+
218
+ return logits, loss
219
+
220
+ # A function to generate text.
221
+ def generate(self, idx, max_new_tokens):
222
+ # idx is (B, T) tensor of indices in the current context.
223
+ for _ in range(max_new_tokens):
224
+ # Crop idx to block_size, as the model has a limited context.
225
+ idx_cond = idx[:, -block_size:]
226
+ # Get predictions.
227
+ logits, loss = self(idx_cond)
228
+ # Focus only on the last time step.
229
+ logits = logits[:, -1, :]
230
+ # Apply softmax to get probabilities.
231
+ probs = F.softmax(logits, dim=-1)
232
+ # Sample from the distribution.
233
+ idx_next = torch.multinomial(probs, num_samples=1)
234
+ # Append the new token to the sequence.
235
+ idx = torch.cat((idx, idx_next), dim=1)
236
+ return idx
237
+
238
+ # --- Training and Generation ---
239
+ model = LanguageModel()
240
+ m = model.to(device)
241
+
242
+ # Create a PyTorch optimizer.
243
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
244
+
245
+ # Main training loop.
246
+ for iter in range(max_iters):
247
+ # Every few iterations, evaluate the loss on both splits.
248
+ if iter % eval_interval == 0:
249
+ losses = estimate_loss()
250
+ print(f"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
251
+
252
+ # Sample a batch of data.
253
+ xb, yb = get_batch('train')
254
+
255
+ # Forward pass: compute loss.
256
+ logits, loss = model(xb, yb)
257
+ # Backward pass: compute gradients.
258
+ optimizer.zero_grad(set_to_none=True)
259
+ loss.backward()
260
+ # Update the model parameters.
261
+ optimizer.step()
262
+
263
+ # --- Generate new text from the trained model ---
264
+ context = torch.zeros((1, 1), dtype=torch.long, device=device)
265
+ generated_text_indices = m.generate(context, max_new_tokens=20)
266
+ print("\nGenerated text:")
267
+ print(decode(generated_text_indices[0].tolist()))