Upload folder using huggingface_hub
Browse files- prepare.py +68 -0
- train.bin +3 -0
- val.bin +3 -0
prepare.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
from tqdm import tqdm
|
5 |
+
from datasets import load_dataset
|
6 |
+
from transformers import LlamaTokenizerFast
|
7 |
+
|
8 |
+
num_proc = 4
|
9 |
+
block_size = 1024
|
10 |
+
|
11 |
+
if __name__ == "__main__":
|
12 |
+
|
13 |
+
# features: ['text', 'id', 'dump', 'url', 'date', 'file_path', 'language', 'language_score', 'token_count']
|
14 |
+
ds = load_dataset("HuggingFaceFW/fineweb", name="sample-10BT", num_proc=num_proc)
|
15 |
+
|
16 |
+
split_ds = ds["train"].train_test_split(test_size=0.0005, seed=2357, shuffle=True)
|
17 |
+
split_ds['val'] = split_ds.pop('test')
|
18 |
+
|
19 |
+
tokenizer = LlamaTokenizerFast.from_pretrained(
|
20 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
21 |
+
pad_token="<s>",
|
22 |
+
model_max_length=None,
|
23 |
+
add_bos_token=False,
|
24 |
+
add_eos_token=True,
|
25 |
+
)
|
26 |
+
|
27 |
+
# tokenize the dataset.
|
28 |
+
# it does the truncation, padding, and overlowing
|
29 |
+
# into a new sequence with it's own bos and eos token for us.
|
30 |
+
def process(example):
|
31 |
+
ids = tokenizer(
|
32 |
+
example['text'],
|
33 |
+
truncation=False,
|
34 |
+
return_length=True,
|
35 |
+
padding=False,
|
36 |
+
return_tensors='pt',
|
37 |
+
)['input_ids'][0]
|
38 |
+
out = {'ids': ids, 'len': len(ids)}
|
39 |
+
return out
|
40 |
+
|
41 |
+
tokenized = split_ds.map(
|
42 |
+
process,
|
43 |
+
remove_columns=ds['train'].column_names,
|
44 |
+
desc="tokenizing the splits",
|
45 |
+
num_proc=num_proc,
|
46 |
+
)
|
47 |
+
|
48 |
+
# concatenate all the ids in each dataset into one large file we can use for training
|
49 |
+
for split, dset in tokenized.items():
|
50 |
+
arr_len = np.sum(dset['len'], dtype=np.uint64)
|
51 |
+
filename = os.path.join(os.path.dirname(__file__), f'{split}.bin')
|
52 |
+
dtype = np.uint16 # (can do since vocab_size == 32000 is < 2**16)
|
53 |
+
arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,))
|
54 |
+
total_batches = 1024
|
55 |
+
|
56 |
+
idx = 0
|
57 |
+
for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'):
|
58 |
+
# Batch together samples for faster write
|
59 |
+
batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy')
|
60 |
+
arr_batch = np.concatenate(batch['ids'], axis=0)
|
61 |
+
# Write into mmap
|
62 |
+
arr[idx : idx + len(arr_batch)] = arr_batch
|
63 |
+
idx += len(arr_batch)
|
64 |
+
arr.flush()
|
65 |
+
|
66 |
+
# train.bin is ~22GB, val.bin ~8.5MB
|
67 |
+
# train has ~11B tokens (11,774,429,883)
|
68 |
+
# val has ~5.9M tokens (5,908,112)
|
train.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f3721c1e56bb6fe461982e65429a9c33c8af6307146c9673489da756c1df5c94
|
3 |
+
size 23548859766
|
val.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1c66e01f7bb7ba8465cb49e819ec7f3c409f1b9673ef33a4d12bb0711f85aec2
|
3 |
+
size 11816224
|