Upload aux_files/train_mock_data_order.py with huggingface_hub
Browse files
aux_files/train_mock_data_order.py
CHANGED
@@ -1 +1,436 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file mocks the startup of the lit-gpt-dev train.py file to materialize the data order.
|
3 |
+
|
4 |
+
It either iterates the data for inspection, or converts the files to the parquet format,
|
5 |
+
optionally consolidating each worker's data into a single file for better portability.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import os
|
9 |
+
import shutil
|
10 |
+
from functools import partial
|
11 |
+
from multiprocessing import Pool
|
12 |
+
import random
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from jsonargparse import CLI
|
16 |
+
from pathlib import Path
|
17 |
+
|
18 |
+
from torch.utils.data import DataLoader
|
19 |
+
|
20 |
+
from datasets import Dataset, load_dataset
|
21 |
+
|
22 |
+
from litgpt.settings import CLISettings, DataEntry
|
23 |
+
from litgpt.tokenizer import Tokenizer
|
24 |
+
from litgpt.packed_cycle_dataset import CombinedDataset, PackedDataset
|
25 |
+
from litgpt.data_scheduler_utils import DataSchedulerTracker, DataScheduler
|
26 |
+
from litgpt.data_loading_utils import generic_collate_fn
|
27 |
+
|
28 |
+
os.environ["OUTPUT_DIR"] = f"{os.path.dirname(__file__)}/output/data_order_test"
|
29 |
+
os.makedirs(os.environ["OUTPUT_DIR"], exist_ok=True)
|
30 |
+
|
31 |
+
# The settings class will look for these to set the topology and localization handles.
|
32 |
+
# for some we set them as defaults, but the world will be configured to be larger and multiprocessing
|
33 |
+
# will be used to parallelize the data order materialization.
|
34 |
+
|
35 |
+
# defaults
|
36 |
+
os.environ["SLURM_JOB_ID"] = "0"
|
37 |
+
os.environ["SLURM_ARRAY_JOB_ID"] = "0"
|
38 |
+
os.environ["SLURM_ARRAY_TASK_ID"] = "0"
|
39 |
+
os.environ["SLURM_ARRAY_TASK_COUNT"] = "1"
|
40 |
+
os.environ["MASTER_ADDR"] = "computer0"
|
41 |
+
os.environ["MASTER_PORT"] = "12345"
|
42 |
+
|
43 |
+
# world size controls the number of shards of work that we will mock
|
44 |
+
# os.environ["SLURM_JOB_NUM_NODES"] = "1" # 1 nodes
|
45 |
+
# os.environ["WORLD_SIZE"] = f"{1 * 1}" # 1 nodes, 1 GPUs each
|
46 |
+
# os.environ["SLURM_JOB_NUM_NODES"] = "4" # 4 nodes
|
47 |
+
# os.environ["WORLD_SIZE"] = f"{4 * 8}" # 4 nodes, 8 GPUs each
|
48 |
+
os.environ["SLURM_JOB_NUM_NODES"] = "32" # 32 nodes
|
49 |
+
os.environ["WORLD_SIZE"] = f"{32 * 8}" # 32 nodes, 8 GPUs each
|
50 |
+
|
51 |
+
os.environ["RANK"] = "0"
|
52 |
+
os.environ["SLURM_PROCID"] = "0"
|
53 |
+
# in case we want to mock a smaller number of tasks per node
|
54 |
+
os.environ["SLURM_NTASKS_PER_NODE"] = str(min(8, int(os.environ["WORLD_SIZE"])))
|
55 |
+
|
56 |
+
# we can run with a smaller real worker pool size
|
57 |
+
# but still do the same number of shards of work, just in batches of size pool
|
58 |
+
# NUM_PROC = int(os.environ["WORLD_SIZE"])
|
59 |
+
# NUM_PROC = 1
|
60 |
+
# NUM_PROC = 32
|
61 |
+
NUM_PROC = 64
|
62 |
+
|
63 |
+
assert NUM_PROC <= int(os.environ["WORLD_SIZE"]), (
|
64 |
+
f"NUM_PROC ({NUM_PROC}) must be less than or equal to WORLD_SIZE ({os.environ['WORLD_SIZE']}). "
|
65 |
+
"This is to ensure that we do not exceed the number of available ranks in the mock distributed setup."
|
66 |
+
)
|
67 |
+
|
68 |
+
# when mocking the parallel workers, this is not used
|
69 |
+
# when linearizing, this determines how many chunks to save the complete dataset into
|
70 |
+
PARQUET_SHARDS = None
|
71 |
+
# PARQUET_SHARDS = 32
|
72 |
+
# PARQUET_SHARDS = 256
|
73 |
+
|
74 |
+
assert PARQUET_SHARDS is None or (
|
75 |
+
int(os.environ["WORLD_SIZE"]) == 1
|
76 |
+
), f"PARQUET_SHARDS ({PARQUET_SHARDS}) must be None when WORLD_SIZE ({os.environ['WORLD_SIZE']}) is greater than 1. "
|
77 |
+
|
78 |
+
|
79 |
+
# we also mock a fabric-like class to hold the key fields like world_size and global_rank
|
80 |
+
# that we will use to configure the local view of the dataset for each "rank"
|
81 |
+
# when spinning up the multiprocessing workers.
|
82 |
+
class MockFabric:
|
83 |
+
"""Mock fabric to simulate distributed settings."""
|
84 |
+
|
85 |
+
def __init__(self, global_rank: int, world_size: int):
|
86 |
+
"""Initialize the mock fabric with global rank and world size."""
|
87 |
+
self.global_rank = global_rank
|
88 |
+
self.world_size = world_size
|
89 |
+
|
90 |
+
def print(self, *args, **kwargs):
|
91 |
+
"""Mock fabric.print function to simulate distributed printing."""
|
92 |
+
print(*args, **kwargs)
|
93 |
+
|
94 |
+
|
95 |
+
def create_dataloader(
|
96 |
+
data_config: list[DataEntry],
|
97 |
+
batch_size: int,
|
98 |
+
block_size: int,
|
99 |
+
n_chunks: int,
|
100 |
+
data_dir: str,
|
101 |
+
fabric: MockFabric,
|
102 |
+
seed: int = 1337,
|
103 |
+
*,
|
104 |
+
cfg: CLISettings,
|
105 |
+
tokenizer: Tokenizer,
|
106 |
+
get_filenames_only: bool = False,
|
107 |
+
filenames: list = None,
|
108 |
+
):
|
109 |
+
global_data_dir = data_dir
|
110 |
+
datasets = []
|
111 |
+
for curr_config in data_config:
|
112 |
+
|
113 |
+
# NOTE omitted other blocks here
|
114 |
+
|
115 |
+
prefix = curr_config.prefix
|
116 |
+
|
117 |
+
if curr_config.data_dir is not None:
|
118 |
+
data_dir = curr_config.data_dir
|
119 |
+
else:
|
120 |
+
data_dir = global_data_dir
|
121 |
+
|
122 |
+
if get_filenames_only:
|
123 |
+
if fabric.global_rank == 0:
|
124 |
+
filenames = [str(pth) for pth in sorted(Path(data_dir).glob(f"{prefix}*"))]
|
125 |
+
if cfg.shuffle_filenames:
|
126 |
+
random.seed(seed)
|
127 |
+
random.shuffle(filenames) # inplace
|
128 |
+
if not filenames:
|
129 |
+
raise FileNotFoundError(f"No files found at {str(data_dir)} with prefix {prefix}.")
|
130 |
+
else:
|
131 |
+
filenames: list[str] = None # type: ignore # hashtag believe
|
132 |
+
|
133 |
+
# If we only want to get the filenames, return them directly.
|
134 |
+
return filenames, None
|
135 |
+
else:
|
136 |
+
pass
|
137 |
+
|
138 |
+
# filenames = fabric.broadcast(filenames, 0) # this is a blocking op from rank 0 to all other ranks
|
139 |
+
# we can't broadcast in the mock multiprocessing context, so the filenames are
|
140 |
+
# constructed once and passed to all workers
|
141 |
+
assert filenames is not None, ""
|
142 |
+
|
143 |
+
# log after broadcast so we know we passed it.
|
144 |
+
if fabric.global_rank == 0:
|
145 |
+
num_processes = (fabric.world_size,)
|
146 |
+
process_rank = (fabric.global_rank,)
|
147 |
+
fabric.print(
|
148 |
+
f"Rank ({process_rank}/{num_processes}) glob'd {len(filenames)} files"
|
149 |
+
f" from {data_dir}{f' w/ prefix {prefix}' if prefix not in ['','*'] else ''},"
|
150 |
+
f" files[:3]: {filenames[:3]}"
|
151 |
+
)
|
152 |
+
|
153 |
+
dataset = PackedDataset(
|
154 |
+
filenames,
|
155 |
+
n_chunks=n_chunks,
|
156 |
+
block_size=block_size,
|
157 |
+
shuffle=cfg.shuffle_blocks,
|
158 |
+
seed=seed,
|
159 |
+
num_processes=fabric.world_size,
|
160 |
+
process_rank=fabric.global_rank,
|
161 |
+
data_id=prefix,
|
162 |
+
return_data_id=curr_config.return_data_id or cfg.return_data_id,
|
163 |
+
)
|
164 |
+
|
165 |
+
# NOTE omitted other blocks here
|
166 |
+
|
167 |
+
datasets.append(dataset)
|
168 |
+
|
169 |
+
if not datasets:
|
170 |
+
raise RuntimeError(
|
171 |
+
f"No data found at {data_dir}. Make sure you ran prepare_redpajama.py to create the dataset."
|
172 |
+
)
|
173 |
+
|
174 |
+
weights = [curr_config.weight for curr_config in data_config]
|
175 |
+
data_scheduler_tracker = DataSchedulerTracker(weights)
|
176 |
+
|
177 |
+
combined_dataset = CombinedDataset(
|
178 |
+
datasets=datasets, seed=seed, data_scheduler_tracker=data_scheduler_tracker, data_telemetry=cfg.data_telemetry
|
179 |
+
)
|
180 |
+
|
181 |
+
parametrized_collate_fn = partial(
|
182 |
+
generic_collate_fn,
|
183 |
+
tokenizer=tokenizer,
|
184 |
+
block_size=cfg.loader_block_size,
|
185 |
+
pad_to_block_size=cfg.pad_to_block_size,
|
186 |
+
add_bos=cfg.add_bos,
|
187 |
+
add_eos=cfg.add_eos,
|
188 |
+
collate_checks_enabled=cfg.collate_checks_enabled,
|
189 |
+
all_block_size_tensors=cfg.all_block_size_tensors,
|
190 |
+
no_shift_ret_raw_tokens=True, # we don't shift the tokens in this mock
|
191 |
+
)
|
192 |
+
|
193 |
+
return (
|
194 |
+
DataLoader(
|
195 |
+
combined_dataset,
|
196 |
+
batch_size=batch_size,
|
197 |
+
shuffle=False,
|
198 |
+
pin_memory=False,
|
199 |
+
collate_fn=parametrized_collate_fn,
|
200 |
+
num_workers=cfg.dataloader_num_workers,
|
201 |
+
),
|
202 |
+
data_scheduler_tracker,
|
203 |
+
)
|
204 |
+
|
205 |
+
|
206 |
+
def create_dataloaders(
|
207 |
+
batch_size: int,
|
208 |
+
block_size: int,
|
209 |
+
fabric: MockFabric,
|
210 |
+
seed: int = 1337,
|
211 |
+
*,
|
212 |
+
cfg: CLISettings,
|
213 |
+
tokenizer: Tokenizer,
|
214 |
+
get_filenames_only: bool = False,
|
215 |
+
train_filenames: list = None,
|
216 |
+
val_filenames: list = None,
|
217 |
+
):
|
218 |
+
cfg.train_dataset_prefixes = [ds.prefix for ds in cfg.data_config["train_data"]]
|
219 |
+
cfg.val_dataset_prefixes = (
|
220 |
+
[ds.prefix for ds in cfg.data_config["val_data"]] if "val_data" in cfg.data_config else []
|
221 |
+
)
|
222 |
+
|
223 |
+
fabric.print(f"Creating dataloaders with seed: {seed}")
|
224 |
+
train_dataloader, data_scheduler_tracker = create_dataloader(
|
225 |
+
cfg.data_config["train_data"],
|
226 |
+
batch_size=batch_size,
|
227 |
+
block_size=block_size,
|
228 |
+
n_chunks=cfg.n_chunks,
|
229 |
+
fabric=fabric,
|
230 |
+
data_dir=cfg.train_data_dir,
|
231 |
+
seed=seed,
|
232 |
+
cfg=cfg,
|
233 |
+
tokenizer=tokenizer,
|
234 |
+
get_filenames_only=get_filenames_only,
|
235 |
+
filenames=train_filenames,
|
236 |
+
)
|
237 |
+
val_dataloader, val_data_scheduler_tracker = (
|
238 |
+
create_dataloader(
|
239 |
+
cfg.data_config["val_data"],
|
240 |
+
batch_size=batch_size,
|
241 |
+
block_size=block_size,
|
242 |
+
n_chunks=cfg.n_chunks,
|
243 |
+
fabric=fabric,
|
244 |
+
data_dir=cfg.val_data_dir,
|
245 |
+
seed=seed,
|
246 |
+
cfg=cfg,
|
247 |
+
tokenizer=tokenizer,
|
248 |
+
get_filenames_only=get_filenames_only,
|
249 |
+
filenames=val_filenames,
|
250 |
+
)
|
251 |
+
if "val_data" in cfg.data_config
|
252 |
+
else (None, None)
|
253 |
+
)
|
254 |
+
return train_dataloader, val_dataloader, data_scheduler_tracker, val_data_scheduler_tracker
|
255 |
+
|
256 |
+
|
257 |
+
def materialize_worker_dataset(
|
258 |
+
worker_id: int,
|
259 |
+
total_workers: int,
|
260 |
+
cfg: CLISettings,
|
261 |
+
tokenizer: Tokenizer,
|
262 |
+
get_filenames_only: bool = False,
|
263 |
+
train_filenames: list = None,
|
264 |
+
val_filenames: list = None,
|
265 |
+
ret_dataloaders: bool = True,
|
266 |
+
iterate_k_steps: int = None,
|
267 |
+
save_k_steps_to_parquet: int = None,
|
268 |
+
):
|
269 |
+
"""Builds a mock dataset for a worker based on its ID and total workers."""
|
270 |
+
# Simulate some data order materialization logic
|
271 |
+
fabric = MockFabric(global_rank=worker_id, world_size=total_workers)
|
272 |
+
|
273 |
+
# For demonstration, we just print the worker info
|
274 |
+
print(f"Worker ready ({fabric.global_rank}/{fabric.world_size})")
|
275 |
+
|
276 |
+
train_dataloader, val_dataloader, data_scheduler_tracker, val_data_scheduler_tracker = create_dataloaders(
|
277 |
+
batch_size=cfg.micro_batch_size,
|
278 |
+
block_size=cfg.loader_block_size,
|
279 |
+
fabric=fabric,
|
280 |
+
seed=(cfg.seed + fabric.global_rank),
|
281 |
+
cfg=cfg,
|
282 |
+
tokenizer=tokenizer,
|
283 |
+
get_filenames_only=get_filenames_only,
|
284 |
+
train_filenames=train_filenames,
|
285 |
+
val_filenames=val_filenames,
|
286 |
+
)
|
287 |
+
if get_filenames_only:
|
288 |
+
# will have resulted in train_filenames, val_filenames, None, None
|
289 |
+
return train_dataloader, val_dataloader, data_scheduler_tracker, val_data_scheduler_tracker
|
290 |
+
|
291 |
+
print(f"Worker {fabric.global_rank} created dataloaders.")
|
292 |
+
|
293 |
+
if ret_dataloaders:
|
294 |
+
# Return the dataloaders and schedulers for further processing
|
295 |
+
return (
|
296 |
+
train_dataloader,
|
297 |
+
val_dataloader,
|
298 |
+
data_scheduler_tracker,
|
299 |
+
val_data_scheduler_tracker,
|
300 |
+
)
|
301 |
+
|
302 |
+
if iterate_k_steps is not None:
|
303 |
+
# simulate one step of the train dataloader draw
|
304 |
+
train_iterator = iter(train_dataloader)
|
305 |
+
|
306 |
+
for step in tqdm(range(iterate_k_steps), total=iterate_k_steps, desc=f"Worker {fabric.global_rank} iterating"):
|
307 |
+
|
308 |
+
data_batch = next(train_iterator)
|
309 |
+
input_ids, labels, metadata = data_batch
|
310 |
+
|
311 |
+
if step < 3 and fabric.global_rank % int(os.environ["SLURM_NTASKS_PER_NODE"]) == 0:
|
312 |
+
# Print the first few batches for inspection
|
313 |
+
# and only from the "first worker on each node"
|
314 |
+
print(
|
315 |
+
f"Worker {fabric.global_rank} processed step {step + 1}/{iterate_k_steps} with input_ids shape: {input_ids.shape}, "
|
316 |
+
f"labels shape: {labels.shape}, metadata: {metadata}, input_ids[:,:10]: {input_ids[:,:10]}"
|
317 |
+
)
|
318 |
+
|
319 |
+
# the final op we'll allow is to construct a hf dataset from a generator wrapped around the dataloader
|
320 |
+
# and then save it to parquet.
|
321 |
+
# we'll point the ds cache directly into the output directory and also save the parquet version there
|
322 |
+
if save_k_steps_to_parquet is not None:
|
323 |
+
|
324 |
+
def k_step_generator(dataloader, k_steps):
|
325 |
+
"""Generator to yield k steps from the dataloader."""
|
326 |
+
train_iterator = iter(dataloader)
|
327 |
+
for step in tqdm(range(k_steps), total=k_steps, desc=f"Worker {fabric.global_rank} converting to dataset"):
|
328 |
+
data_batch = next(train_iterator)
|
329 |
+
input_ids, labels, metadata = data_batch
|
330 |
+
for i in range(input_ids.shape[0]):
|
331 |
+
yield {
|
332 |
+
"input_ids": input_ids[i],
|
333 |
+
}
|
334 |
+
|
335 |
+
# Create a Dataset from the generator
|
336 |
+
gen_partial = partial(
|
337 |
+
k_step_generator,
|
338 |
+
dataloader=train_dataloader,
|
339 |
+
k_steps=save_k_steps_to_parquet,
|
340 |
+
)
|
341 |
+
|
342 |
+
cache_dir = f"{os.environ['OUTPUT_DIR']}/cache_{fabric.global_rank}"
|
343 |
+
os.makedirs(cache_dir, exist_ok=True)
|
344 |
+
|
345 |
+
dataset = Dataset.from_generator(gen_partial, cache_dir=cache_dir, num_proc=1)
|
346 |
+
|
347 |
+
# Save the dataset to parquet
|
348 |
+
output_dir = f'{os.environ["OUTPUT_DIR"]}/parquet'
|
349 |
+
os.makedirs(output_dir, exist_ok=True)
|
350 |
+
output_path = f"{output_dir}/worker_{fabric.global_rank:03d}-of-{fabric.world_size:03d}_ordered_dataset.parquet"
|
351 |
+
print(f"Worker {fabric.global_rank} saving dataset to {output_path}...")
|
352 |
+
|
353 |
+
# Optionally allow sharding the dataset into multiple files
|
354 |
+
if PARQUET_SHARDS is None:
|
355 |
+
dataset.to_parquet(output_path)
|
356 |
+
else:
|
357 |
+
for i in range(PARQUET_SHARDS):
|
358 |
+
shard_output_path = f"{output_dir}/ordered_dataset_shard_{i:03d}-of-{PARQUET_SHARDS:03d}.parquet"
|
359 |
+
print(f"Worker {fabric.global_rank} saving shard {i} to {shard_output_path}...")
|
360 |
+
dataset.shard(num_shards=PARQUET_SHARDS, index=i).to_parquet(shard_output_path)
|
361 |
+
|
362 |
+
# clear the cache dir for this worker after saving
|
363 |
+
shutil.rmtree(cache_dir)
|
364 |
+
|
365 |
+
success = True # Simulate success or failure of dataset preparation
|
366 |
+
|
367 |
+
return success
|
368 |
+
|
369 |
+
|
370 |
+
def main():
|
371 |
+
"""Encapsulates main scope away from import calls."""
|
372 |
+
|
373 |
+
# Configuration loader
|
374 |
+
cfg: CLISettings = CLI(CLISettings) # type: ignore
|
375 |
+
|
376 |
+
if cfg.max_steps is None:
|
377 |
+
cfg.max_tokens_per_device = cfg.max_tokens // cfg.WORLD_SIZE
|
378 |
+
cfg.tokens_per_step = cfg.micro_batch_size * cfg.block_size
|
379 |
+
cfg.max_steps = cfg.max_tokens_per_device // cfg.tokens_per_step
|
380 |
+
|
381 |
+
print(
|
382 |
+
f"Computed max steps: {cfg.max_steps} based on max tokens {cfg.max_tokens} and micro batch size {cfg.micro_batch_size}."
|
383 |
+
)
|
384 |
+
else:
|
385 |
+
print(f"Using provided max steps: {cfg.max_steps}.")
|
386 |
+
|
387 |
+
tokenizer = Tokenizer(cfg.tokenizer_path)
|
388 |
+
if tokenizer.pad_id is None:
|
389 |
+
tokenizer.pad_id = -1
|
390 |
+
|
391 |
+
# before we start the pool, we run one instance of the worker function just to get the filenames
|
392 |
+
# as we'll need to pass those to the workers instead of using a distributed broadcast.
|
393 |
+
train_filenames, val_filenames, _, _ = materialize_worker_dataset(
|
394 |
+
worker_id=0,
|
395 |
+
total_workers=cfg.WORLD_SIZE,
|
396 |
+
cfg=cfg,
|
397 |
+
tokenizer=tokenizer,
|
398 |
+
get_filenames_only=True,
|
399 |
+
)
|
400 |
+
print(f"Got {len(train_filenames)} total train files and {len(val_filenames)} total val files.")
|
401 |
+
|
402 |
+
# Simulate distributed env using multiprocessing pool
|
403 |
+
pool_size = min(NUM_PROC, cfg.WORLD_SIZE)
|
404 |
+
with Pool(processes=pool_size) as pool:
|
405 |
+
print(f"Starting a pool of {pool_size} workers to mock a dist world size of {cfg.WORLD_SIZE}.")
|
406 |
+
# Prepare the worker function with the config
|
407 |
+
worker_func = partial(
|
408 |
+
materialize_worker_dataset,
|
409 |
+
total_workers=cfg.WORLD_SIZE,
|
410 |
+
cfg=cfg,
|
411 |
+
tokenizer=tokenizer,
|
412 |
+
train_filenames=train_filenames,
|
413 |
+
val_filenames=val_filenames,
|
414 |
+
get_filenames_only=False, # we want to actually process the data
|
415 |
+
ret_dataloaders=False, # we don't need the dataloaders back in
|
416 |
+
iterate_k_steps=None,
|
417 |
+
# iterate_k_steps=10_000, # iterate over the dataloader for k steps
|
418 |
+
# iterate_k_steps=cfg.max_steps,
|
419 |
+
# save_k_steps_to_parquet=None,
|
420 |
+
# save_k_steps_to_parquet=10_000, # save the dataset to parquet after k steps
|
421 |
+
save_k_steps_to_parquet=cfg.max_steps,
|
422 |
+
)
|
423 |
+
|
424 |
+
# Map the worker function to each worker ID and print results
|
425 |
+
results = pool.map(worker_func, range(cfg.WORLD_SIZE))
|
426 |
+
|
427 |
+
# Print results for each worker
|
428 |
+
for worker_id, success in enumerate(results):
|
429 |
+
if success:
|
430 |
+
print(f"Worker {worker_id} completed successfully.")
|
431 |
+
else:
|
432 |
+
print(f"Worker {worker_id} failed to prepare dataset.")
|
433 |
+
|
434 |
+
|
435 |
+
if __name__ == "__main__":
|
436 |
+
main()
|