sft-v5 / dataset.py
sheep33333's picture
Upload dataset.py with huggingface_hub
f01f9fe verified
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
import copy
import numpy as np
import pdb
import os
import io
import gzip
import zstandard as zstd
import numpy as np
from tqdm import tqdm
# import pandas as pd
import json
from torch.utils.data import Dataset
class SemiNATForMultiRoundMaskInputStream(Dataset):
def __init__(self, tokenizer, data_path, max_length):
self.tokenizer = tokenizer
self.max_length = max_length
self.data_path = data_path
# 预先记录每行在文件中的 byte offset
self.offsets = []
offset = 0
with open(data_path, 'r') as f:
for line in f:
self.offsets.append(offset)
offset += len(line.encode('utf-8'))
def __len__(self):
return len(self.offsets)
def __getitem__(self, idx):
with open(self.data_path, 'r') as f:
f.seek(self.offsets[idx])
line = f.readline()
row = json.loads(line)
messages = row['messages']
total_inputs = []
total_labels = []
sample_slice = []
slice_label = []
for msg in messages:
inputs = self.tokenizer(msg['content'],
padding=False,
truncation=False,
add_special_tokens=False).input_ids
total_inputs.extend(inputs)
if msg['role'] == 'system':
total_labels.extend(len(inputs) * [-100])
slice_label.extend(len(msg['split_pos']) * [-1])
elif msg['role'] == 'user':
total_labels.extend(len(inputs) * [-100])
slice_label.extend(len(msg['split_pos']) * [-1])
elif msg['role'] == 'assistant':
total_labels.extend(inputs)
slice_label.extend(len(msg['split_pos']) * [1])
sample_slice.extend(msg.get('split_pos', []))
# pdb.set_trace()
seq_len = min(len(total_inputs), self.max_length)
input_ids = total_inputs[:self.max_length] + [self.tokenizer.pad_token_id] * (self.max_length - seq_len)
labels = total_labels[:self.max_length] + [-100] * (self.max_length - seq_len)
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
slice_arr = sample_slice[:self.max_length] + [-1] * (self.max_length - len(sample_slice))
slice_arr = [s if s < self.max_length - 1 else -1 for s in slice_arr]
slice_label = slice_label[:self.max_length] + [-1] * (self.max_length - len(slice_label))
if all(l == -100 for l in labels):
return None
# try:
# assert len(input_ids) == self.max_length, f"input_ids len {len(input_ids)}"
# assert len(labels) == self.max_length, f"labels len {len(labels)}"
# assert len(attention_mask) == self.max_length, f"attention_mask len {len(attention_mask)}"
# assert len(slice_arr) == self.max_length, f"slice_arr len {len(slice_arr)}"
# except:
# pdb.set_trace()
# print()
return (
torch.tensor(input_ids, dtype=torch.long),
torch.tensor(labels, dtype=torch.long),
torch.tensor(attention_mask, dtype=torch.long),
torch.tensor(slice_arr, dtype=torch.long),
torch.tensor(slice_label, dtype=torch.long)
)
class SemiNATMaskInput(Dataset):
'''
Mask掉了所有的输入,只有输出的loss
'''
def __init__(self, tokenizer, datas, max_length, proc):
self.tokenizer = tokenizer
self.max_length = max_length
self.proc = proc
# 用 apply + 并行加速预处理
processed = self._vectorized_preprocess(datas)
self.input_ids = processed["input_ids"]
self.labels = processed["labels"]
self.attention_mask = processed["attention_mask"]
self.slice_indices = processed["slice_indices"]
def _vectorized_preprocess(self, datas):
# 批量预分配内存
input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
attention_mask = np.zeros((len(datas), self.max_length),
dtype=np.int64)
labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
slice_indices = np.full((len(datas), self.max_length),
-1,
dtype=np.int64)
# 批量处理所有行的 messages
def process_row(row):
total_inputs = []
total_labels = []
sample_slice = []
# pdb.set_trace()
for msg in row['messages']:
# 批量分词(假设 msg['content'] 是文本列表)
inputs = self.tokenizer(msg['content'],
padding=False,
truncation=False,
add_special_tokens=False).input_ids
total_inputs.extend(inputs)
if msg['role'] == 'user':
total_labels.extend(len(inputs) * [-100])
elif msg['role'] == 'assistant':
total_labels.extend(inputs)
sample_slice.extend(msg['split_pos'])
# 截断或填充逻辑
seq_len = min(len(total_inputs), self.max_length)
# 输入和标签
input_ids = total_inputs[:self.max_length] + [
self.tokenizer.pad_token_id
] * (self.max_length - seq_len)
labels = total_labels[:self.max_length] + [-100] * (
self.max_length - seq_len)
if all(l == -100 for l in labels):
return None # 这一条数据无有效标签,丢弃
# attention_mask
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
# slice_indices
slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
(self.max_length - len(sample_slice)))
slice_arr[slice_arr >= self.max_length -
1] = -1 # 过滤超长位置,这里-1是因为max length是1024,最大index是1023
return input_ids, labels, attention_mask, slice_arr
# 并行处理所有行(需安装 pandarallel)
try:
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=self.proc, progress_bar=False)
processed = datas.parallel_apply(process_row, axis=1)
except ImportError:
processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
processed = processed[processed.notnull()].reset_index(drop=True)
# 合并结果
# pdb.set_trace()
for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
input_ids[idx] = i_ids
labels[idx] = lbl
attention_mask[idx] = attn
slice_indices[idx] = slc
# return {
# "input_ids": input_ids,
# "labels": labels,
# "attention_mask": attention_mask,
# "slice_indices": slice_indices
# }
processed_len = len(processed)
return {
"input_ids": input_ids[:processed_len],
"labels": labels[:processed_len],
"attention_mask": attention_mask[:processed_len],
"slice_indices": slice_indices[:processed_len]
}
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
# 直接返回预分配的张量,避免重复转换
return (torch.as_tensor(self.input_ids[index]),
torch.as_tensor(self.labels[index]),
torch.as_tensor(self.attention_mask[index]),
torch.as_tensor(self.slice_indices[index]))
class SemiNATForSingleRound(Dataset):
def __init__(self, tokenizer, datas, max_length, proc):
self.tokenizer = tokenizer
self.max_length = max_length
self.proc = proc
# 用 apply + 并行加速预处理
processed = self._vectorized_preprocess(datas)
self.input_ids = processed["input_ids"]
self.labels = processed["labels"]
self.attention_mask = processed["attention_mask"]
self.slice_indices = processed["slice_indices"]
def _vectorized_preprocess(self, datas):
# 批量预分配内存
input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64)
attention_mask = np.zeros((len(datas), self.max_length),
dtype=np.int64)
labels = np.full((len(datas), self.max_length), -100, dtype=np.int64)
slice_indices = np.full((len(datas), self.max_length),
-1,
dtype=np.int64)
# 批量处理所有行的 messages
def process_row(row):
total_inputs = []
sample_slice = []
for msg in row['messages']:
# 批量分词(假设 msg['content'] 是文本列表)
inputs = self.tokenizer(msg['content'],
padding=False,
truncation=False,
add_special_tokens=False).input_ids
total_inputs.extend(inputs)
# 直接使用列表扩展 slice
sample_slice.extend(msg['split_pos'])
# 截断或填充逻辑
seq_len = min(len(total_inputs), self.max_length)
# 输入和标签
input_ids = total_inputs[:self.max_length] + [
self.tokenizer.pad_token_id
] * (self.max_length - seq_len)
labels = total_inputs[:self.max_length] + [-100] * (
self.max_length - seq_len)
# attention_mask
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
# slice_indices
slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
(self.max_length - len(sample_slice)))
slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
return input_ids, labels, attention_mask, slice_arr
# 并行处理所有行(需安装 pandarallel)
try:
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=self.proc, progress_bar=False)
processed = datas.parallel_apply(process_row, axis=1)
except:
processed = datas.progress_apply(process_row, axis=1) # tqdm 进度条
# 合并结果
for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
input_ids[idx] = i_ids
labels[idx] = lbl
attention_mask[idx] = attn
slice_indices[idx] = slc
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"slice_indices": slice_indices
}
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
# 直接返回预分配的张量,避免重复转换
return (torch.as_tensor(self.input_ids[index]),
torch.as_tensor(self.labels[index]),
torch.as_tensor(self.attention_mask[index]),
torch.as_tensor(self.slice_indices[index]))
class SemiNATDatasetForPretrain(Dataset):
# data is jsonl.zstd or json.gz file
def __init__(self,
tokenizer,
data_files,
max_length,
proc,
cache_path=None):
if cache_path and os.path.exists(cache_path):
print(f"[INFO] Loading cached data from {cache_path}")
cached = torch.load(cache_path)
self.input_ids = cached["input_ids"]
self.labels = cached["labels"]
self.attention_mask = cached["attention_mask"]
self.slice_indices = cached["slice_indices"]
return
data = []
for filename in data_files:
if filename.endswith('.zstd'):
data.append(
pd.DataFrame([
json.loads(line) for line in
self._decompress_zst_to_string(filename).splitlines()
]))
else: # json.gz file, each line a json
with gzip.open(filename, 'rt', encoding='utf-8') as f:
data.append(pd.DataFrame([json.loads(line) for line in f]))
data = pd.concat(data, ignore_index=True)
self.tokenizer = tokenizer
self.max_length = max_length
self.proc = proc
processed = self._vectorized_preprocess(data)
self.input_ids = processed["input_ids"]
self.labels = processed["labels"]
self.attention_mask = processed["attention_mask"]
self.slice_indices = processed["slice_indices"]
if type(self.input_ids) != torch.Tensor:
self.input_ids = torch.tensor(self.input_ids, dtype=torch.long)
self.labels = torch.tensor(self.labels, dtype=torch.long)
self.attention_mask = torch.tensor(self.attention_mask,
dtype=torch.long)
self.slice_indices = torch.tensor(self.slice_indices,
dtype=torch.long)
def _decompress_zst_to_string(self, input_file):
with open(input_file, 'rb') as f:
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(f) as reader:
text_stream = io.TextIOWrapper(reader, encoding='utf-8')
return text_stream.read() # 读取为字符串
def _vectorized_preprocess(self, data):
input_ids = np.zeros((len(data), self.max_length), dtype=np.int64)
attention_mask = np.zeros((len(data), self.max_length), dtype=np.int64)
labels = np.full((len(data), self.max_length), -100, dtype=np.int64)
slice_indices = np.full((len(data), self.max_length),
-1,
dtype=np.int64)
def process_row(row):
inputs = self.tokenizer(row['text'],
padding=False,
truncation=False,
add_special_tokens=False).input_ids
# slice to 8-token segments. that is, sample_slice is [1, 9, 17, 25, ...]
sample_slice = (np.arange(0, len(inputs), 8) + 1).tolist()
# add the end
if len(inputs) % 8 != 1:
sample_slice.append(len(inputs))
# 截断或填充逻辑
seq_len = min(len(inputs), self.max_length)
# 输入和标签
input_ids = inputs[:self.max_length] + [
self.tokenizer.pad_token_id
] * (self.max_length - seq_len)
labels = [
50279 # <EOS>
] + inputs[:self.max_length -
1] + [-100] * (self.max_length - 1 - seq_len)
# attention_mask
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len)
# slice_indices
slice_arr = np.array(sample_slice[:self.max_length] + [-1] *
(self.max_length - len(sample_slice)))
slice_arr[slice_arr > self.max_length] = -1 # 过滤超长位置
return input_ids, labels, attention_mask, slice_arr
try:
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=self.proc, progress_bar=False)
processed = data.parallel_apply(process_row, axis=1)
except ImportError:
processed = data.progress_apply(process_row, axis=1) # tqdm 进度条
# 合并结果
for idx, (i_ids, lbl, attn, slc) in enumerate(processed):
input_ids[idx] = i_ids
labels[idx] = lbl
attention_mask[idx] = attn
slice_indices[idx] = slc
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"slice_indices": slice_indices
}
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
return (self.input_ids[index], self.labels[index],
self.attention_mask[index], self.slice_indices[index])