IR_Project / Retriever /gen_embedding.py
Lk123's picture
Upload 18 files
83c2bac verified
import os
import pickle
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
from typing import List, Tuple, Dict
import pandas as pd
import argparse
from tqdm import tqdm # 导入 tqdm 用于显示进度条
class ContextDataset(Dataset):
"""
自定义数据集类,用于批量加载上下文文本。
"""
def __init__(self, contexts: List[Tuple[str, str, str]]):
"""
Args:
contexts (List[Tuple[str, str, str]]): 每个条目为 (id, title, text)。
"""
self.contexts = contexts
def __len__(self):
return len(self.contexts)
def __getitem__(self, idx):
# 返回 (id, title, text)
return self.contexts[idx]
class DPRContextEncoderBatchProcessor:
def __init__(self, model_name: str = "facebook/dpr-ctx_encoder-multiset-base", batch_size: int = 16, device: str = None):
self.device = device if device else ("cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = DPRContextEncoderTokenizer.from_pretrained(model_name)
self.model = DPRContextEncoder.from_pretrained(model_name).to(self.device)
self.batch_size = batch_size
def process_contexts(self, contexts: List[Tuple[str, str, str]], insert_title: bool = True) -> List[Tuple[str, List[float]]]:
"""
批量处理上下文文本并生成嵌入向量。
Args:
contexts (List[Tuple[str, str, str]]): 每个条目为 (id, title, text)。
insert_title (bool): 是否在文本前插入标题。
Returns:
List[Tuple[str, List[float]]]: 每个上下文文本的 ID 和嵌入向量列表。
"""
dataset = ContextDataset(contexts)
dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False)
embeddings = []
for batch_contexts in tqdm(dataloader, desc="Processing contexts", unit="batch"):
ids, titles, texts = batch_contexts
print(f"ids:{ids}")
# 如果 insert_title 为 True,则将 title 和 text 组合在一起,否则只使用 text
if insert_title:
combined_texts = [f"{title} [SEP] {text}" for title, text in zip(titles, texts)]
else:
combined_texts = [text for text in texts]
inputs = self.tokenizer(
combined_texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=512
).to(self.device)
with torch.no_grad():
outputs = self.model(**inputs).pooler_output
batch_embeddings = outputs.cpu().numpy()
for idx, embedding in enumerate(batch_embeddings):
embeddings.append((ids[idx].item(), embedding))
return embeddings
def save_embeddings(self, contexts: List[Tuple[str, str, str]], output_file: str, insert_title: bool = True):
"""
生成嵌入并保存到指定文件。
Args:
contexts (List[Tuple[str, str, str]]): 每个条目为 (id, title, text)。
output_file (str): 输出文件路径。
insert_title (bool): 是否在文本前插入标题。
"""
print("Generating embeddings...")
embeddings = self.process_contexts(contexts, insert_title=insert_title)
result = {embedding[0]: embedding[1] for embedding in embeddings}
os.makedirs(os.path.dirname(output_file), exist_ok=True) # 确保输出目录存在
print("Saving embeddings to file...")
with tqdm(total=len(embeddings), desc="Saving embeddings", unit="embedding") as pbar:
with open(output_file, "wb") as f:
pickle.dump(result, f)
pbar.update(len(embeddings))
print(f"Total passages processed: {len(embeddings)}. Embeddings saved to {output_file}")
def load_and_process_tsv(self, tsv_file: str, output_file: str, insert_title: bool = True):
"""
从 TSV 文件中加载数据并生成嵌入。
Args:
tsv_file (str): TSV 文件路径。
output_file (str): 输出文件路径。
insert_title (bool): 是否在文本前插入标题。
"""
print(f"Loading data from {tsv_file}...")
df = pd.read_csv(tsv_file, sep='\t')
# 提取 id, title 和 text 列
ids = df['id'].tolist()
titles = df['title'].fillna("").tolist()
texts = df['text'].fillna("").tolist()
contexts = list(zip(ids, titles, texts))
self.save_embeddings(contexts, output_file, insert_title=insert_title)
def main():
parser = argparse.ArgumentParser(description="DPR Context Encoder Batch Processor")
parser.add_argument("--tsv_file", type=str, required=True, help="Path to the input TSV file with 'id', 'title', and 'text' columns.")
parser.add_argument("--output_file", type=str, required=True, help="Path to save the generated embeddings.")
parser.add_argument("--batch_size", type=int, default=8, help="Batch size for processing contexts.")
parser.add_argument("--model_name", type=str, default="facebook/dpr-ctx_encoder-multiset-base", help="Name of the DPR context encoder model.")
parser.add_argument("--insert_title", action="store_true", help="Whether to insert the title before the text for embedding generation.")
parser.add_argument("--device", type=str, default=None, help="Device to run the model on (e.g., 'cuda' or 'cpu').")
args = parser.parse_args()
processor = DPRContextEncoderBatchProcessor(
model_name=args.model_name,
batch_size=args.batch_size,
device=args.device
)
processor.load_and_process_tsv(args.tsv_file, args.output_file, insert_title=args.insert_title)
if __name__ == "__main__":
main()