sheep33333 commited on
Commit
93e682d
·
verified ·
1 Parent(s): a36b3a8

Upload dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset.py +336 -0
dataset.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import Dataset
3
+ from tqdm import tqdm
4
+ import copy
5
+ import numpy as np
6
+ import pdb
7
+ import os
8
+ import io
9
+ import json
10
+ import gzip
11
+ import zstandard as zstd
12
+ import numpy as np
13
+ from tqdm import tqdm
14
+ import pandas as pd
15
+
16
+
17
+ class SemiNATForSingleRoundMaskInput(Dataset):
18
+ '''
19
+ Mask掉了所有的输入,只有输出的loss
20
+ '''
21
+
22
+ def __init__(self, tokenizer, datas, max_length, proc):
23
+ self.tokenizer = tokenizer
24
+ self.max_length = max_length
25
+ self.proc = proc
26
+ # 用 apply + 并行加速预处理
27
+ processed = self._vectorized_preprocess(datas)
28
+ self.input_ids = processed["input_ids"]
29
+ self.labels = processed["labels"]
30
+ self.attention_mask = processed["attention_mask"]
31
+ self.slice_indices = processed["slice_indices"]
32
+
33
+ def _vectorized_preprocess(self, datas):
34
+ # 批量预分配内存
35
+ input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
36
+ attention_mask = np.zeros((len(datas), self.max_length),
37
+ dtype=np.int64)
38
+ labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
39
+ slice_indices = np.full((len(datas), self.max_length),
40
+ -1,
41
+ dtype=np.int64)
42
+
43
+ # 批量处理所有行的 messages
44
+ def process_row(row):
45
+ total_inputs = []
46
+ total_labels = []
47
+ sample_slice = []
48
+
49
+ # pdb.set_trace()
50
+ for msg in row['messages']:
51
+ # 批量分词(假设 msg['content'] 是文本列表)
52
+ inputs = self.tokenizer(msg['content'],
53
+ padding=False,
54
+ truncation=False,
55
+ add_special_tokens=False).input_ids
56
+ total_inputs.extend(inputs)
57
+
58
+ if msg['role'] == 'user':
59
+ total_labels.extend(len(inputs) * [-100])
60
+ elif msg['role'] == 'assistant':
61
+ total_labels.extend(inputs)
62
+ sample_slice.extend(msg['split_pos'])
63
+
64
+ # 截断或填充逻辑
65
+ seq_len = min(len(total_inputs), self.max_length)
66
+ # 输入和标签
67
+ input_ids = total_inputs[:self.max_length] + [
68
+ self.tokenizer.pad_token_id
69
+ ] * (self.max_length - seq_len)
70
+ labels = total_labels[:self.max_length] + [-100] * (
71
+ self.max_length - seq_len)
72
+
73
+ if all(l == -100 for l in labels):
74
+ return None # 这一条数据无有效标签,丢弃
75
+
76
+ # attention_mask
77
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
78
+ # slice_indices
79
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
80
+ (self.max_length - len(sample_slice)))
81
+ slice_arr[slice_arr >= self.max_length -
82
+ 1] = -1 # 过滤超长位置,这里-1是因为max length是1024,最大index是1023
83
+
84
+ return input_ids, labels, attention_mask, slice_arr
85
+
86
+ # 并行处理所有行(需安装 pandarallel)
87
+ try:
88
+ from pandarallel import pandarallel
89
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
90
+ processed = datas.parallel_apply(process_row, axis=1)
91
+ except ImportError:
92
+ processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
93
+
94
+ processed = processed[processed.notnull()].reset_index(drop=True)
95
+
96
+ # 合并结果
97
+ # pdb.set_trace()
98
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
99
+ input_ids[idx] = i_ids
100
+ labels[idx] = lbl
101
+ attention_mask[idx] = attn
102
+ slice_indices[idx] = slc
103
+
104
+ # return {
105
+ # "input_ids": input_ids,
106
+ # "labels": labels,
107
+ # "attention_mask": attention_mask,
108
+ # "slice_indices": slice_indices
109
+ # }
110
+
111
+ processed_len = len(processed)
112
+ return {
113
+ "input_ids": input_ids[:processed_len],
114
+ "labels": labels[:processed_len],
115
+ "attention_mask": attention_mask[:processed_len],
116
+ "slice_indices": slice_indices[:processed_len]
117
+ }
118
+
119
+ def __len__(self):
120
+ return len(self.input_ids)
121
+
122
+ def __getitem__(self, index):
123
+ # 直接返回预分配的张量,避免重复转换
124
+ return (torch.as_tensor(self.input_ids[index]),
125
+ torch.as_tensor(self.labels[index]),
126
+ torch.as_tensor(self.attention_mask[index]),
127
+ torch.as_tensor(self.slice_indices[index]))
128
+
129
+
130
+ class SemiNATForSingleRound(Dataset):
131
+
132
+ def __init__(self, tokenizer, datas, max_length, proc):
133
+ self.tokenizer = tokenizer
134
+ self.max_length = max_length
135
+ self.proc = proc
136
+ # 用 apply + 并行加速预处理
137
+ processed = self._vectorized_preprocess(datas)
138
+ self.input_ids = processed["input_ids"]
139
+ self.labels = processed["labels"]
140
+ self.attention_mask = processed["attention_mask"]
141
+ self.slice_indices = processed["slice_indices"]
142
+
143
+ def _vectorized_preprocess(self, datas):
144
+ # 批量预分配内存
145
+ input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
146
+ attention_mask = np.zeros((len(datas), self.max_length),
147
+ dtype=np.int64)
148
+ labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
149
+ slice_indices = np.full((len(datas), self.max_length),
150
+ -1,
151
+ dtype=np.int64)
152
+
153
+ # 批量处理所有行的 messages
154
+ def process_row(row):
155
+ total_inputs = []
156
+ sample_slice = []
157
+
158
+ for msg in row['messages']:
159
+ # 批量分词(假设 msg['content'] 是文本列表)
160
+ inputs = self.tokenizer(msg['content'],
161
+ padding=False,
162
+ truncation=False,
163
+ add_special_tokens=False).input_ids
164
+ total_inputs.extend(inputs)
165
+ # 直接使用列表扩展 slice
166
+ sample_slice.extend(msg['split_pos'])
167
+
168
+ # 截断或填充逻辑
169
+ seq_len = min(len(total_inputs), self.max_length)
170
+ # 输入和标签
171
+ input_ids = total_inputs[:self.max_length] + [
172
+ self.tokenizer.pad_token_id
173
+ ] * (self.max_length - seq_len)
174
+ labels = total_inputs[:self.max_length] + [-100] * (
175
+ self.max_length - seq_len)
176
+ # attention_mask
177
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
178
+ # slice_indices
179
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
180
+ (self.max_length - len(sample_slice)))
181
+ slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
182
+
183
+ return input_ids, labels, attention_mask, slice_arr
184
+
185
+ # 并行处理所有行(需安装 pandarallel)
186
+ try:
187
+ from pandarallel import pandarallel
188
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
189
+ processed = datas.parallel_apply(process_row, axis=1)
190
+ except:
191
+ processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
192
+
193
+ # 合并结果
194
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
195
+ input_ids[idx] = i_ids
196
+ labels[idx] = lbl
197
+ attention_mask[idx] = attn
198
+ slice_indices[idx] = slc
199
+
200
+ return {
201
+ "input_ids": input_ids,
202
+ "labels": labels,
203
+ "attention_mask": attention_mask,
204
+ "slice_indices": slice_indices
205
+ }
206
+
207
+ def __len__(self):
208
+ return len(self.input_ids)
209
+
210
+ def __getitem__(self, index):
211
+ # 直接返回预分配的张量,避免重复转换
212
+ return (torch.as_tensor(self.input_ids[index]),
213
+ torch.as_tensor(self.labels[index]),
214
+ torch.as_tensor(self.attention_mask[index]),
215
+ torch.as_tensor(self.slice_indices[index]))
216
+
217
+
218
+ class SemiNATDatasetForPretrain(Dataset):
219
+ # data is jsonl.zstd or json.gz file
220
+ def __init__(self,
221
+ tokenizer,
222
+ data_files,
223
+ max_length,
224
+ proc,
225
+ cache_path=None):
226
+ if cache_path and os.path.exists(cache_path):
227
+ print(f"[INFO] Loading cached data from {cache_path}")
228
+ cached = torch.load(cache_path)
229
+ self.input_ids = cached["input_ids"]
230
+ self.labels = cached["labels"]
231
+ self.attention_mask = cached["attention_mask"]
232
+ self.slice_indices = cached["slice_indices"]
233
+ return
234
+ data = []
235
+ for filename in data_files:
236
+ if filename.endswith('.zstd'):
237
+ data.append(
238
+ pd.DataFrame([
239
+ json.loads(line) for line in
240
+ self._decompress_zst_to_string(filename).splitlines()
241
+ ]))
242
+ else: # json.gz file, each line a json
243
+ with gzip.open(filename, 'rt', encoding='utf-8') as f:
244
+ data.append(pd.DataFrame([json.loads(line) for line in f]))
245
+ data = pd.concat(data, ignore_index=True)
246
+
247
+ self.tokenizer = tokenizer
248
+ self.max_length = max_length
249
+ self.proc = proc
250
+
251
+ processed = self._vectorized_preprocess(data)
252
+ self.input_ids = processed["input_ids"]
253
+ self.labels = processed["labels"]
254
+ self.attention_mask = processed["attention_mask"]
255
+ self.slice_indices = processed["slice_indices"]
256
+
257
+ if type(self.input_ids) != torch.Tensor:
258
+ self.input_ids = torch.tensor(self.input_ids, dtype=torch.long)
259
+ self.labels = torch.tensor(self.labels, dtype=torch.long)
260
+ self.attention_mask = torch.tensor(self.attention_mask,
261
+ dtype=torch.long)
262
+ self.slice_indices = torch.tensor(self.slice_indices,
263
+ dtype=torch.long)
264
+
265
+ def _decompress_zst_to_string(self, input_file):
266
+ with open(input_file, 'rb') as f:
267
+ dctx = zstd.ZstdDecompressor()
268
+ with dctx.stream_reader(f) as reader:
269
+ text_stream = io.TextIOWrapper(reader, encoding='utf-8')
270
+ return text_stream.read() # 读取为字符串
271
+
272
+ def _vectorized_preprocess(self, data):
273
+ input_ids = np.zeros((len(data), self.max_length), dtype=np.int64)
274
+ attention_mask = np.zeros((len(data), self.max_length), dtype=np.int64)
275
+ labels = np.full((len(data), self.max_length), -100, dtype=np.int64)
276
+ slice_indices = np.full((len(data), self.max_length),
277
+ -1,
278
+ dtype=np.int64)
279
+
280
+ def process_row(row):
281
+ inputs = self.tokenizer(row['text'],
282
+ padding=False,
283
+ truncation=False,
284
+ add_special_tokens=False).input_ids
285
+ # slice to 8-token segments. that is, sample_slice is [1, 9, 17, 25, ...]
286
+ sample_slice = (np.arange(0, len(inputs), 8) + 1).tolist()
287
+ # add the end
288
+ if len(inputs) % 8 != 1:
289
+ sample_slice.append(len(inputs))
290
+
291
+ # 截断或填充逻辑
292
+ seq_len = min(len(inputs), self.max_length)
293
+ # 输入和标签
294
+ input_ids = inputs[:self.max_length] + [
295
+ self.tokenizer.pad_token_id
296
+ ] * (self.max_length - seq_len)
297
+ labels = [
298
+ 50279 # <EOS>
299
+ ] + inputs[:self.max_length -
300
+ 1] + [-100] * (self.max_length - 1 - seq_len)
301
+ # attention_mask
302
+ attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
303
+ # slice_indices
304
+ slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
305
+ (self.max_length - len(sample_slice)))
306
+ slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
307
+
308
+ return input_ids, labels, attention_mask, slice_arr
309
+
310
+ try:
311
+ from pandarallel import pandarallel
312
+ pandarallel.initialize(nb_workers=self.proc, progress_bar=True)
313
+ processed = data.parallel_apply(process_row, axis=1)
314
+ except ImportError:
315
+ processed = data.progress_apply(process_row, axis=1) # tqdm 进度条
316
+
317
+ # 合并结果
318
+ for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
319
+ input_ids[idx] = i_ids
320
+ labels[idx] = lbl
321
+ attention_mask[idx] = attn
322
+ slice_indices[idx] = slc
323
+
324
+ return {
325
+ "input_ids": input_ids,
326
+ "labels": labels,
327
+ "attention_mask": attention_mask,
328
+ "slice_indices": slice_indices
329
+ }
330
+
331
+ def __len__(self):
332
+ return len(self.input_ids)
333
+
334
+ def __getitem__(self, index):
335
+ return (self.input_ids[index], self.labels[index],
336
+ self.attention_mask[index], self.slice_indices[index])