|
import torch |
|
from torch.utils.data import Dataset |
|
from tqdm import tqdm |
|
import copy |
|
import numpy as np |
|
import pdb |
|
import os |
|
import io |
|
import gzip |
|
import zstandard as zstd |
|
import numpy as np |
|
from tqdm import tqdm |
|
|
|
import json |
|
from torch.utils.data import Dataset |
|
|
|
|
|
|
|
class SemiNATForMultiRoundMaskInputStream(Dataset): |
|
def __init__(self, tokenizer, data_path, max_length): |
|
self.tokenizer = tokenizer |
|
self.max_length = max_length |
|
self.data_path = data_path |
|
|
|
|
|
self.offsets = [] |
|
offset = 0 |
|
with open(data_path, 'r') as f: |
|
for line in f: |
|
self.offsets.append(offset) |
|
offset += len(line.encode('utf-8')) |
|
|
|
def __len__(self): |
|
return len(self.offsets) |
|
|
|
def __getitem__(self, idx): |
|
with open(self.data_path, 'r') as f: |
|
f.seek(self.offsets[idx]) |
|
line = f.readline() |
|
row = json.loads(line) |
|
messages = row['messages'] |
|
|
|
total_inputs = [] |
|
total_labels = [] |
|
sample_slice = [] |
|
slice_label = [] |
|
|
|
for msg in messages: |
|
inputs = self.tokenizer(msg['content'], |
|
padding=False, |
|
truncation=False, |
|
add_special_tokens=False).input_ids |
|
total_inputs.extend(inputs) |
|
if msg['role'] == 'system': |
|
total_labels.extend(len(inputs) * [-100]) |
|
slice_label.extend(len(msg['split_pos']) * [-1]) |
|
elif msg['role'] == 'user': |
|
total_labels.extend(len(inputs) * [-100]) |
|
slice_label.extend(len(msg['split_pos']) * [-1]) |
|
elif msg['role'] == 'assistant': |
|
total_labels.extend(inputs) |
|
slice_label.extend(len(msg['split_pos']) * [1]) |
|
sample_slice.extend(msg.get('split_pos', [])) |
|
|
|
|
|
|
|
|
|
seq_len = min(len(total_inputs), self.max_length) |
|
input_ids = total_inputs[:self.max_length] + [self.tokenizer.pad_token_id] * (self.max_length - seq_len) |
|
labels = total_labels[:self.max_length] + [-100] * (self.max_length - seq_len) |
|
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len) |
|
slice_arr = sample_slice[:self.max_length] + [-1] * (self.max_length - len(sample_slice)) |
|
slice_arr = [s if s < self.max_length - 1 else -1 for s in slice_arr] |
|
slice_label = slice_label[:self.max_length] + [-1] * (self.max_length - len(slice_label)) |
|
|
|
if all(l == -100 for l in labels): |
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return ( |
|
torch.tensor(input_ids, dtype=torch.long), |
|
torch.tensor(labels, dtype=torch.long), |
|
torch.tensor(attention_mask, dtype=torch.long), |
|
torch.tensor(slice_arr, dtype=torch.long), |
|
torch.tensor(slice_label, dtype=torch.long) |
|
) |
|
|
|
|
|
class SemiNATMaskInput(Dataset): |
|
''' |
|
Mask掉了所有的输入,只有输出的loss |
|
''' |
|
|
|
def __init__(self, tokenizer, datas, max_length, proc): |
|
self.tokenizer = tokenizer |
|
self.max_length = max_length |
|
self.proc = proc |
|
|
|
processed = self._vectorized_preprocess(datas) |
|
self.input_ids = processed["input_ids"] |
|
self.labels = processed["labels"] |
|
self.attention_mask = processed["attention_mask"] |
|
self.slice_indices = processed["slice_indices"] |
|
|
|
def _vectorized_preprocess(self, datas): |
|
|
|
input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64) |
|
attention_mask = np.zeros((len(datas), self.max_length), |
|
dtype=np.int64) |
|
labels = np.full((len(datas), self.max_length), -100, dtype=np.int64) |
|
slice_indices = np.full((len(datas), self.max_length), |
|
-1, |
|
dtype=np.int64) |
|
|
|
|
|
def process_row(row): |
|
total_inputs = [] |
|
total_labels = [] |
|
sample_slice = [] |
|
|
|
|
|
for msg in row['messages']: |
|
|
|
inputs = self.tokenizer(msg['content'], |
|
padding=False, |
|
truncation=False, |
|
add_special_tokens=False).input_ids |
|
total_inputs.extend(inputs) |
|
|
|
if msg['role'] == 'user': |
|
total_labels.extend(len(inputs) * [-100]) |
|
elif msg['role'] == 'assistant': |
|
total_labels.extend(inputs) |
|
sample_slice.extend(msg['split_pos']) |
|
|
|
|
|
seq_len = min(len(total_inputs), self.max_length) |
|
|
|
input_ids = total_inputs[:self.max_length] + [ |
|
self.tokenizer.pad_token_id |
|
] * (self.max_length - seq_len) |
|
labels = total_labels[:self.max_length] + [-100] * ( |
|
self.max_length - seq_len) |
|
|
|
if all(l == -100 for l in labels): |
|
return None |
|
|
|
|
|
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len) |
|
|
|
slice_arr = np.array(sample_slice[:self.max_length] + [-1] * |
|
(self.max_length - len(sample_slice))) |
|
slice_arr[slice_arr >= self.max_length - |
|
1] = -1 |
|
|
|
return input_ids, labels, attention_mask, slice_arr |
|
|
|
|
|
try: |
|
from pandarallel import pandarallel |
|
pandarallel.initialize(nb_workers=self.proc, progress_bar=False) |
|
processed = datas.parallel_apply(process_row, axis=1) |
|
except ImportError: |
|
processed = datas.progress_apply(process_row, axis=1) |
|
|
|
processed = processed[processed.notnull()].reset_index(drop=True) |
|
|
|
|
|
|
|
for idx, (i_ids, lbl, attn, slc) in enumerate(processed): |
|
input_ids[idx] = i_ids |
|
labels[idx] = lbl |
|
attention_mask[idx] = attn |
|
slice_indices[idx] = slc |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
processed_len = len(processed) |
|
return { |
|
"input_ids": input_ids[:processed_len], |
|
"labels": labels[:processed_len], |
|
"attention_mask": attention_mask[:processed_len], |
|
"slice_indices": slice_indices[:processed_len] |
|
} |
|
|
|
def __len__(self): |
|
return len(self.input_ids) |
|
|
|
def __getitem__(self, index): |
|
|
|
return (torch.as_tensor(self.input_ids[index]), |
|
torch.as_tensor(self.labels[index]), |
|
torch.as_tensor(self.attention_mask[index]), |
|
torch.as_tensor(self.slice_indices[index])) |
|
|
|
|
|
class SemiNATForSingleRound(Dataset): |
|
|
|
def __init__(self, tokenizer, datas, max_length, proc): |
|
self.tokenizer = tokenizer |
|
self.max_length = max_length |
|
self.proc = proc |
|
|
|
processed = self._vectorized_preprocess(datas) |
|
self.input_ids = processed["input_ids"] |
|
self.labels = processed["labels"] |
|
self.attention_mask = processed["attention_mask"] |
|
self.slice_indices = processed["slice_indices"] |
|
|
|
def _vectorized_preprocess(self, datas): |
|
|
|
input_ids = np.zeros((len(datas), self.max_length), dtype=np.int64) |
|
attention_mask = np.zeros((len(datas), self.max_length), |
|
dtype=np.int64) |
|
labels = np.full((len(datas), self.max_length), -100, dtype=np.int64) |
|
slice_indices = np.full((len(datas), self.max_length), |
|
-1, |
|
dtype=np.int64) |
|
|
|
|
|
def process_row(row): |
|
total_inputs = [] |
|
sample_slice = [] |
|
|
|
for msg in row['messages']: |
|
|
|
inputs = self.tokenizer(msg['content'], |
|
padding=False, |
|
truncation=False, |
|
add_special_tokens=False).input_ids |
|
total_inputs.extend(inputs) |
|
|
|
sample_slice.extend(msg['split_pos']) |
|
|
|
|
|
seq_len = min(len(total_inputs), self.max_length) |
|
|
|
input_ids = total_inputs[:self.max_length] + [ |
|
self.tokenizer.pad_token_id |
|
] * (self.max_length - seq_len) |
|
labels = total_inputs[:self.max_length] + [-100] * ( |
|
self.max_length - seq_len) |
|
|
|
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len) |
|
|
|
slice_arr = np.array(sample_slice[:self.max_length] + [-1] * |
|
(self.max_length - len(sample_slice))) |
|
slice_arr[slice_arr > self.max_length] = -1 |
|
|
|
return input_ids, labels, attention_mask, slice_arr |
|
|
|
|
|
try: |
|
from pandarallel import pandarallel |
|
pandarallel.initialize(nb_workers=self.proc, progress_bar=False) |
|
processed = datas.parallel_apply(process_row, axis=1) |
|
except: |
|
processed = datas.progress_apply(process_row, axis=1) |
|
|
|
|
|
for idx, (i_ids, lbl, attn, slc) in enumerate(processed): |
|
input_ids[idx] = i_ids |
|
labels[idx] = lbl |
|
attention_mask[idx] = attn |
|
slice_indices[idx] = slc |
|
|
|
return { |
|
"input_ids": input_ids, |
|
"labels": labels, |
|
"attention_mask": attention_mask, |
|
"slice_indices": slice_indices |
|
} |
|
|
|
def __len__(self): |
|
return len(self.input_ids) |
|
|
|
def __getitem__(self, index): |
|
|
|
return (torch.as_tensor(self.input_ids[index]), |
|
torch.as_tensor(self.labels[index]), |
|
torch.as_tensor(self.attention_mask[index]), |
|
torch.as_tensor(self.slice_indices[index])) |
|
|
|
|
|
class SemiNATDatasetForPretrain(Dataset): |
|
|
|
def __init__(self, |
|
tokenizer, |
|
data_files, |
|
max_length, |
|
proc, |
|
cache_path=None): |
|
if cache_path and os.path.exists(cache_path): |
|
print(f"[INFO] Loading cached data from {cache_path}") |
|
cached = torch.load(cache_path) |
|
self.input_ids = cached["input_ids"] |
|
self.labels = cached["labels"] |
|
self.attention_mask = cached["attention_mask"] |
|
self.slice_indices = cached["slice_indices"] |
|
return |
|
data = [] |
|
for filename in data_files: |
|
if filename.endswith('.zstd'): |
|
data.append( |
|
pd.DataFrame([ |
|
json.loads(line) for line in |
|
self._decompress_zst_to_string(filename).splitlines() |
|
])) |
|
else: |
|
with gzip.open(filename, 'rt', encoding='utf-8') as f: |
|
data.append(pd.DataFrame([json.loads(line) for line in f])) |
|
data = pd.concat(data, ignore_index=True) |
|
|
|
self.tokenizer = tokenizer |
|
self.max_length = max_length |
|
self.proc = proc |
|
|
|
processed = self._vectorized_preprocess(data) |
|
self.input_ids = processed["input_ids"] |
|
self.labels = processed["labels"] |
|
self.attention_mask = processed["attention_mask"] |
|
self.slice_indices = processed["slice_indices"] |
|
|
|
if type(self.input_ids) != torch.Tensor: |
|
self.input_ids = torch.tensor(self.input_ids, dtype=torch.long) |
|
self.labels = torch.tensor(self.labels, dtype=torch.long) |
|
self.attention_mask = torch.tensor(self.attention_mask, |
|
dtype=torch.long) |
|
self.slice_indices = torch.tensor(self.slice_indices, |
|
dtype=torch.long) |
|
|
|
def _decompress_zst_to_string(self, input_file): |
|
with open(input_file, 'rb') as f: |
|
dctx = zstd.ZstdDecompressor() |
|
with dctx.stream_reader(f) as reader: |
|
text_stream = io.TextIOWrapper(reader, encoding='utf-8') |
|
return text_stream.read() |
|
|
|
def _vectorized_preprocess(self, data): |
|
input_ids = np.zeros((len(data), self.max_length), dtype=np.int64) |
|
attention_mask = np.zeros((len(data), self.max_length), dtype=np.int64) |
|
labels = np.full((len(data), self.max_length), -100, dtype=np.int64) |
|
slice_indices = np.full((len(data), self.max_length), |
|
-1, |
|
dtype=np.int64) |
|
|
|
def process_row(row): |
|
inputs = self.tokenizer(row['text'], |
|
padding=False, |
|
truncation=False, |
|
add_special_tokens=False).input_ids |
|
|
|
sample_slice = (np.arange(0, len(inputs), 8) + 1).tolist() |
|
|
|
if len(inputs) % 8 != 1: |
|
sample_slice.append(len(inputs)) |
|
|
|
|
|
seq_len = min(len(inputs), self.max_length) |
|
|
|
input_ids = inputs[:self.max_length] + [ |
|
self.tokenizer.pad_token_id |
|
] * (self.max_length - seq_len) |
|
labels = [ |
|
50279 |
|
] + inputs[:self.max_length - |
|
1] + [-100] * (self.max_length - 1 - seq_len) |
|
|
|
attention_mask = [1] * seq_len + [0] * (self.max_length - seq_len) |
|
|
|
slice_arr = np.array(sample_slice[:self.max_length] + [-1] * |
|
(self.max_length - len(sample_slice))) |
|
slice_arr[slice_arr > self.max_length] = -1 |
|
|
|
return input_ids, labels, attention_mask, slice_arr |
|
|
|
try: |
|
from pandarallel import pandarallel |
|
pandarallel.initialize(nb_workers=self.proc, progress_bar=False) |
|
processed = data.parallel_apply(process_row, axis=1) |
|
except ImportError: |
|
processed = data.progress_apply(process_row, axis=1) |
|
|
|
|
|
for idx, (i_ids, lbl, attn, slc) in enumerate(processed): |
|
input_ids[idx] = i_ids |
|
labels[idx] = lbl |
|
attention_mask[idx] = attn |
|
slice_indices[idx] = slc |
|
|
|
return { |
|
"input_ids": input_ids, |
|
"labels": labels, |
|
"attention_mask": attention_mask, |
|
"slice_indices": slice_indices |
|
} |
|
|
|
def __len__(self): |
|
return len(self.input_ids) |
|
|
|
def __getitem__(self, index): |
|
return (self.input_ids[index], self.labels[index], |
|
self.attention_mask[index], self.slice_indices[index]) |
|
|