DANGDOCAO commited on
Commit
2785a2e
·
verified ·
1 Parent(s): 9fa1773
Files changed (1) hide show
  1. HVU_QA/generate_question.py +139 -139
HVU_QA/generate_question.py CHANGED
@@ -1,139 +1,139 @@
1
- import json
2
- from difflib import SequenceMatcher
3
- from transformers import T5Tokenizer, T5ForConditionalGeneration
4
- from transformers.utils import logging as hf_logging
5
-
6
- hf_logging.set_verbosity_error()
7
-
8
- MODEL_DIR = "t5-viet-qg-finetuned"
9
- DATA_PATH = "30ktrain.json"
10
-
11
- tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
12
- model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
13
-
14
- def find_best_match_from_context(user_context, squad_data):
15
- best_score, best_entry = 0.0, None
16
- ui = user_context.lower()
17
-
18
- for article in squad_data.get("data", []):
19
- context_title = article.get("title", "")
20
- score_title = SequenceMatcher(None, ui, context_title.lower()).ratio()
21
-
22
- for paragraph in article.get("paragraphs", []):
23
- context = paragraph.get("context", "")
24
- for qa in paragraph.get("qas", []):
25
- answers = qa.get("answers", [])
26
- if not answers:
27
- continue
28
- answer_text = answers[0].get("text", "").strip()
29
- question_text = qa.get("question", "").strip()
30
-
31
- score = score_title
32
- if score > best_score:
33
- best_score = score
34
- best_entry = (context, answer_text, question_text)
35
-
36
- return best_entry
37
-
38
- def _near_duplicate(q, seen, thr=0.90):
39
- for s in seen:
40
- if SequenceMatcher(None, q, s).ratio() >= thr:
41
- return True
42
- return False
43
-
44
- def generate_questions(user_context,
45
- total_questions=20,
46
- batch_size=10,
47
- top_k=60,
48
- top_p=0.95,
49
- temperature=0.9, # Tăng temperature để sinh câu hỏi sáng tạo hơn
50
- max_input_len=512,
51
- max_new_tokens=64):
52
- with open(DATA_PATH, "r", encoding="utf-8") as f:
53
- squad_data = json.load(f)
54
-
55
- best_entry = find_best_match_from_context(user_context, squad_data)
56
- if best_entry is None:
57
- print("Không tìm thấy dữ liệu phù hợp trong file JSON.")
58
- return
59
-
60
- context, answer, _ = best_entry
61
-
62
-
63
- input_text = f"answer: {answer} context: {context}"
64
- inputs = tokenizer(
65
- input_text,
66
- return_tensors="pt",
67
- truncation=True,
68
- max_length=max_input_len
69
- )
70
-
71
- unique_questions = []
72
- remaining = total_questions
73
-
74
- while remaining > 0:
75
- n = min(batch_size, remaining)
76
- outputs = model.generate(
77
- **inputs,
78
- do_sample=True,
79
- top_k=top_k,
80
- top_p=top_p,
81
- temperature=temperature,
82
- max_new_tokens=max_new_tokens,
83
- num_return_sequences=n,
84
- no_repeat_ngram_size=3,
85
- repetition_penalty=1.12
86
- )
87
-
88
- for out in outputs:
89
- q = tokenizer.decode(out, skip_special_tokens=True).strip()
90
- if len(q) < 5:
91
- continue
92
- if not _near_duplicate(q, unique_questions, thr=0.90):
93
- unique_questions.append(q)
94
-
95
- remaining = total_questions - len(unique_questions)
96
- if remaining <= 0:
97
- break
98
-
99
- unique_questions = unique_questions[:total_questions]
100
-
101
-
102
- print("Các câu hỏi mới được sinh ra:")
103
- for i, q in enumerate(unique_questions, 1):
104
- if not q.endswith("?"):
105
- q += "?"
106
- print(f"{i}. {q}")
107
-
108
- if __name__ == "__main__":
109
- user_context = input("\nNhập đoạn văn bản:\n ").strip()
110
-
111
- raw_n = input("\nNhập vào số lượng câu hỏi bạn cần:").strip()
112
- if raw_n == "":
113
- total_questions = 20
114
- else:
115
- try:
116
- total_questions = int(raw_n)
117
- except ValueError:
118
- print("Giá trị không hợp lệ. Dùng mặc định 20.")
119
- total_questions = 20
120
-
121
- if total_questions < 1:
122
- total_questions = 1
123
- if total_questions > 200:
124
- total_questions = 200
125
-
126
- batch_size = 20 if total_questions >= 30 else min(20, total_questions)
127
-
128
- print("\nĐang phân tích dữ liệu...\n")
129
-
130
- generate_questions(
131
- user_context=user_context,
132
- total_questions=total_questions,
133
- batch_size=batch_size,
134
- top_k=60,
135
- top_p=0.95,
136
- temperature=0.9,
137
- max_input_len=512,
138
- max_new_tokens=64
139
- )
 
1
+ import json
2
+ from difflib import SequenceMatcher
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
4
+ from transformers.utils import logging as hf_logging
5
+
6
+ hf_logging.set_verbosity_error()
7
+
8
+ MODEL_DIR = "t5-viet-qg-finetuned"
9
+ DATA_PATH = "30ktrain.json"
10
+
11
+ tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
12
+ model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
13
+
14
+ def find_best_match_from_context(user_context, squad_data):
15
+ best_score, best_entry = 0.0, None
16
+ ui = user_context.lower()
17
+
18
+ for article in squad_data.get("data", []):
19
+ context_title = article.get("title", "")
20
+ score_title = SequenceMatcher(None, ui, context_title.lower()).ratio()
21
+
22
+ for paragraph in article.get("paragraphs", []):
23
+ context = paragraph.get("context", "")
24
+ for qa in paragraph.get("qas", []):
25
+ answers = qa.get("answers", [])
26
+ if not answers:
27
+ continue
28
+ answer_text = answers[0].get("text", "").strip()
29
+ question_text = qa.get("question", "").strip()
30
+
31
+ score = score_title
32
+ if score > best_score:
33
+ best_score = score
34
+ best_entry = (context, answer_text, question_text)
35
+
36
+ return best_entry
37
+
38
+ def _near_duplicate(q, seen, thr=0.90):
39
+ for s in seen:
40
+ if SequenceMatcher(None, q, s).ratio() >= thr:
41
+ return True
42
+ return False
43
+
44
+ def generate_questions(user_context,
45
+ total_questions=20,
46
+ batch_size=10,
47
+ top_k=60,
48
+ top_p=0.95,
49
+ temperature=0.9,
50
+ max_input_len=512,
51
+ max_new_tokens=64):
52
+ with open(DATA_PATH, "r", encoding="utf-8") as f:
53
+ squad_data = json.load(f)
54
+
55
+ best_entry = find_best_match_from_context(user_context, squad_data)
56
+ if best_entry is None:
57
+ print("Không tìm thấy dữ liệu phù hợp trong file JSON.")
58
+ return
59
+
60
+ context, answer, _ = best_entry
61
+
62
+
63
+ input_text = f"answer: {answer}\ncontext: {context}\nquestion:"
64
+ inputs = tokenizer(
65
+ input_text,
66
+ return_tensors="pt",
67
+ truncation=True,
68
+ max_length=max_input_len
69
+ )
70
+
71
+ unique_questions = []
72
+ remaining = total_questions
73
+
74
+ while remaining > 0:
75
+ n = min(batch_size, remaining)
76
+ outputs = model.generate(
77
+ **inputs,
78
+ do_sample=True,
79
+ top_k=top_k,
80
+ top_p=top_p,
81
+ temperature=temperature,
82
+ max_new_tokens=max_new_tokens,
83
+ num_return_sequences=n,
84
+ no_repeat_ngram_size=3,
85
+ repetition_penalty=1.12
86
+ )
87
+
88
+ for out in outputs:
89
+ q = tokenizer.decode(out, skip_special_tokens=True).strip()
90
+ if len(q) < 5:
91
+ continue
92
+ if not _near_duplicate(q, unique_questions, thr=0.90):
93
+ unique_questions.append(q)
94
+
95
+ remaining = total_questions - len(unique_questions)
96
+ if remaining <= 0:
97
+ break
98
+
99
+ unique_questions = unique_questions[:total_questions]
100
+
101
+
102
+ print("Các câu hỏi mới được sinh ra:")
103
+ for i, q in enumerate(unique_questions, 1):
104
+ if not q.endswith("?"):
105
+ q += "?"
106
+ print(f"{i}. {q}")
107
+
108
+ if __name__ == "__main__":
109
+ user_context = input("\nNhập đoạn văn bản:\n ").strip()
110
+
111
+ raw_n = input("\nNhập vào số lượng câu hỏi bạn cần:").strip()
112
+ if raw_n == "":
113
+ total_questions = 20
114
+ else:
115
+ try:
116
+ total_questions = int(raw_n)
117
+ except ValueError:
118
+ print("Giá trị không hợp lệ. Dùng mặc định 20.")
119
+ total_questions = 20
120
+
121
+ if total_questions < 1:
122
+ total_questions = 1
123
+ if total_questions > 200:
124
+ total_questions = 200
125
+
126
+ batch_size = 20 if total_questions >= 30 else min(20, total_questions)
127
+
128
+ print("\nĐang phân tích dữ liệu...\n")
129
+
130
+ generate_questions(
131
+ user_context=user_context,
132
+ total_questions=total_questions,
133
+ batch_size=batch_size,
134
+ top_k=60,
135
+ top_p=0.95,
136
+ temperature=0.9,
137
+ max_input_len=512,
138
+ max_new_tokens=64
139
+ )