DANGDOCAO commited on
Commit
d07bfe7
·
verified ·
1 Parent(s): b10bb6d
Files changed (2) hide show
  1. HVU_QA/fine_tune_qg.py +104 -0
  2. HVU_QA/generate_question.py +139 -0
HVU_QA/fine_tune_qg.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from datasets import Dataset
3
+ from sklearn.model_selection import train_test_split
4
+ from transformers import (
5
+ T5Tokenizer,
6
+ T5ForConditionalGeneration,
7
+ TrainingArguments,
8
+ Trainer
9
+ )
10
+
11
+ def load_squad_data(file_path):
12
+
13
+ with open(file_path, "r", encoding="utf-8") as f:
14
+ squad_data = json.load(f)
15
+
16
+ data = []
17
+ for article in squad_data["data"]:
18
+ for paragraph in article["paragraphs"]:
19
+ context = paragraph.get("context", "")
20
+ for qa in paragraph["qas"]:
21
+ if not qa.get("is_impossible", False) and qa.get("answers"):
22
+ answer = qa["answers"][0]["text"]
23
+ question = qa["question"]
24
+ input_text = f"answer: {answer} context: {context}"
25
+ data.append({"input": input_text, "target": question})
26
+ return data
27
+
28
+ def preprocess_function(example, tokenizer, max_input_length=512, max_target_length=64):
29
+
30
+ model_inputs = tokenizer(
31
+ example["input"],
32
+ max_length=max_input_length,
33
+ padding="max_length",
34
+ truncation=True,
35
+ )
36
+ labels = tokenizer(
37
+ text_target=example["target"],
38
+ max_length=max_target_length,
39
+ padding="max_length",
40
+ truncation=True,
41
+ )
42
+ model_inputs["labels"] = labels["input_ids"]
43
+ return model_inputs
44
+
45
+ def main():
46
+ data_path = "30ktrain.json"
47
+ output_dir = "t5-viet-qg-finetuned"
48
+ logs_dir = "logs"
49
+ model_name = "VietAI/vit5-base"
50
+
51
+ print("Tải mô hình và tokenizer...")
52
+ tokenizer = T5Tokenizer.from_pretrained(model_name)
53
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
54
+
55
+ print("Đọc và chia dữ liệu...")
56
+ raw_data = load_squad_data(data_path)
57
+ train_data, val_data = train_test_split(raw_data, test_size=0.2, random_state=42)
58
+
59
+ train_dataset = Dataset.from_list(train_data)
60
+ val_dataset = Dataset.from_list(val_data)
61
+
62
+ tokenized_train = train_dataset.map(
63
+ lambda x: preprocess_function(x, tokenizer),
64
+ batched=True,
65
+ remove_columns=["input", "target"]
66
+ )
67
+ tokenized_val = val_dataset.map(
68
+ lambda x: preprocess_function(x, tokenizer),
69
+ batched=True,
70
+ remove_columns=["input", "target"]
71
+ )
72
+
73
+ print("Cấu hình huấn luyện...")
74
+ training_args = TrainingArguments(
75
+ output_dir=output_dir,
76
+ overwrite_output_dir=True,
77
+ per_device_train_batch_size=1,
78
+ gradient_accumulation_steps=1,
79
+ num_train_epochs=3,
80
+ learning_rate=2e-4,
81
+ weight_decay=0.01,
82
+ warmup_steps=0,
83
+ logging_dir=logs_dir,
84
+ logging_steps=10,
85
+ fp16=False
86
+ )
87
+
88
+ print("Huấn luyện mô hình...")
89
+ trainer = Trainer(
90
+ model=model,
91
+ args=training_args,
92
+ train_dataset=tokenized_train,
93
+ eval_dataset=tokenized_val,
94
+ tokenizer=tokenizer,
95
+ )
96
+ trainer.train()
97
+
98
+ print("Lưu mô hình...")
99
+ model.save_pretrained(output_dir)
100
+ tokenizer.save_pretrained(output_dir)
101
+ print("Huấn luyện hoàn tất!")
102
+
103
+ if __name__ == "__main__":
104
+ main()
HVU_QA/generate_question.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from difflib import SequenceMatcher
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
4
+ from transformers.utils import logging as hf_logging
5
+
6
+ hf_logging.set_verbosity_error()
7
+
8
+ MODEL_DIR = "t5-viet-qg-finetuned"
9
+ DATA_PATH = "30ktrain.json"
10
+
11
+ tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
12
+ model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
13
+
14
+ def find_best_match_from_context(user_context, squad_data):
15
+ best_score, best_entry = 0.0, None
16
+ ui = user_context.lower()
17
+
18
+ for article in squad_data.get("data", []):
19
+ context_title = article.get("title", "")
20
+ score_title = SequenceMatcher(None, ui, context_title.lower()).ratio()
21
+
22
+ for paragraph in article.get("paragraphs", []):
23
+ context = paragraph.get("context", "")
24
+ for qa in paragraph.get("qas", []):
25
+ answers = qa.get("answers", [])
26
+ if not answers:
27
+ continue
28
+ answer_text = answers[0].get("text", "").strip()
29
+ question_text = qa.get("question", "").strip()
30
+
31
+ score = score_title
32
+ if score > best_score:
33
+ best_score = score
34
+ best_entry = (context, answer_text, question_text)
35
+
36
+ return best_entry
37
+
38
+ def _near_duplicate(q, seen, thr=0.90):
39
+ for s in seen:
40
+ if SequenceMatcher(None, q, s).ratio() >= thr:
41
+ return True
42
+ return False
43
+
44
+ def generate_questions(user_context,
45
+ total_questions=20,
46
+ batch_size=10,
47
+ top_k=60,
48
+ top_p=0.95,
49
+ temperature=0.9, # Tăng temperature để sinh câu hỏi sáng tạo hơn
50
+ max_input_len=512,
51
+ max_new_tokens=64):
52
+ with open(DATA_PATH, "r", encoding="utf-8") as f:
53
+ squad_data = json.load(f)
54
+
55
+ best_entry = find_best_match_from_context(user_context, squad_data)
56
+ if best_entry is None:
57
+ print("Không tìm thấy dữ liệu phù hợp trong file JSON.")
58
+ return
59
+
60
+ context, answer, _ = best_entry
61
+
62
+
63
+ input_text = f"answer: {answer} context: {context}"
64
+ inputs = tokenizer(
65
+ input_text,
66
+ return_tensors="pt",
67
+ truncation=True,
68
+ max_length=max_input_len
69
+ )
70
+
71
+ unique_questions = []
72
+ remaining = total_questions
73
+
74
+ while remaining > 0:
75
+ n = min(batch_size, remaining)
76
+ outputs = model.generate(
77
+ **inputs,
78
+ do_sample=True,
79
+ top_k=top_k,
80
+ top_p=top_p,
81
+ temperature=temperature,
82
+ max_new_tokens=max_new_tokens,
83
+ num_return_sequences=n,
84
+ no_repeat_ngram_size=3,
85
+ repetition_penalty=1.12
86
+ )
87
+
88
+ for out in outputs:
89
+ q = tokenizer.decode(out, skip_special_tokens=True).strip()
90
+ if len(q) < 5:
91
+ continue
92
+ if not _near_duplicate(q, unique_questions, thr=0.90):
93
+ unique_questions.append(q)
94
+
95
+ remaining = total_questions - len(unique_questions)
96
+ if remaining <= 0:
97
+ break
98
+
99
+ unique_questions = unique_questions[:total_questions]
100
+
101
+
102
+ print("Các câu hỏi mới được sinh ra:")
103
+ for i, q in enumerate(unique_questions, 1):
104
+ if not q.endswith("?"):
105
+ q += "?"
106
+ print(f"{i}. {q}")
107
+
108
+ if __name__ == "__main__":
109
+ user_context = input("\nNhập đoạn văn bản:\n ").strip()
110
+
111
+ raw_n = input("\nNhập vào số lượng câu hỏi bạn cần:").strip()
112
+ if raw_n == "":
113
+ total_questions = 20
114
+ else:
115
+ try:
116
+ total_questions = int(raw_n)
117
+ except ValueError:
118
+ print("Giá trị không hợp lệ. Dùng mặc định 20.")
119
+ total_questions = 20
120
+
121
+ if total_questions < 1:
122
+ total_questions = 1
123
+ if total_questions > 200:
124
+ total_questions = 200
125
+
126
+ batch_size = 20 if total_questions >= 30 else min(20, total_questions)
127
+
128
+ print("\nĐang phân tích dữ liệu...\n")
129
+
130
+ generate_questions(
131
+ user_context=user_context,
132
+ total_questions=total_questions,
133
+ batch_size=batch_size,
134
+ top_k=60,
135
+ top_p=0.95,
136
+ temperature=0.9,
137
+ max_input_len=512,
138
+ max_new_tokens=64
139
+ )