Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,7 +23,7 @@ import json
|
|
| 23 |
import os
|
| 24 |
from sentence_transformers import SentenceTransformer, util
|
| 25 |
import textstat
|
| 26 |
-
import
|
| 27 |
from transformers import pipeline
|
| 28 |
|
| 29 |
print("***************************************************************")
|
|
@@ -40,7 +40,7 @@ wiki_wiki = wikipediaapi.Wikipedia(user_agent= user_agent,language='en')
|
|
| 40 |
|
| 41 |
@st.cache_resource
|
| 42 |
def load_model():
|
| 43 |
-
model_name = "DevBM/t5-
|
| 44 |
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
| 45 |
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 46 |
return model, tokenizer
|
|
@@ -58,14 +58,12 @@ def load_qa_models():
|
|
| 58 |
# Initialize BERT model for sentence similarity
|
| 59 |
similarity_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
return similarity_model, language_tool
|
| 65 |
|
| 66 |
nlp, s2v = load_nlp_models()
|
| 67 |
model, tokenizer = load_model()
|
| 68 |
-
similarity_model,
|
| 69 |
|
| 70 |
def save_feedback(question, answer,rating):
|
| 71 |
feedback_file = 'question_feedback.json'
|
|
@@ -251,18 +249,18 @@ def assess_question_quality(context, question, answer):
|
|
| 251 |
# Assess complexity using token length (as a simple metric)
|
| 252 |
complexity_score = min(len(question_doc) / 20, 1) # Normalize to 0-1
|
| 253 |
|
| 254 |
-
# Assess
|
| 255 |
-
|
| 256 |
-
|
| 257 |
|
| 258 |
# Calculate overall score (you can adjust weights as needed)
|
| 259 |
overall_score = (
|
| 260 |
0.4 * relevance_score +
|
| 261 |
-
0.
|
| 262 |
-
0.
|
| 263 |
)
|
| 264 |
|
| 265 |
-
return overall_score, relevance_score, complexity_score,
|
| 266 |
|
| 267 |
def main():
|
| 268 |
# Streamlit interface
|
|
@@ -303,7 +301,7 @@ def main():
|
|
| 303 |
break
|
| 304 |
question = generate_question(context, keyword, num_beams=num_beams)
|
| 305 |
options = generate_options(keyword,context)
|
| 306 |
-
overall_score, relevance_score, complexity_score,
|
| 307 |
tpl = {
|
| 308 |
"question" : question,
|
| 309 |
"context" : context,
|
|
@@ -312,7 +310,7 @@ def main():
|
|
| 312 |
"overall_score" : overall_score,
|
| 313 |
"relevance_score" : relevance_score,
|
| 314 |
"complexity_score" : complexity_score,
|
| 315 |
-
"
|
| 316 |
}
|
| 317 |
st.session_state.generated_questions.append(tpl)
|
| 318 |
|
|
@@ -339,7 +337,7 @@ def main():
|
|
| 339 |
st.write(f"**Overall Quality Score:** {q['overall_score']:.2f}")
|
| 340 |
st.write(f"**Relevance Score:** {q['relevance_score']:.2f}")
|
| 341 |
st.write(f"**Complexity Score:** {q['complexity_score']:.2f}")
|
| 342 |
-
st.write(f"**
|
| 343 |
|
| 344 |
# q['context'] = st.text_area(f"Edit Context {i+1}:", value=q['context'], key=f"context_{i}")
|
| 345 |
if enable_feedback_mode:
|
|
@@ -389,7 +387,6 @@ def main():
|
|
| 389 |
st.write("No feedback data available yet.")
|
| 390 |
|
| 391 |
print("********************************************************************************")
|
| 392 |
-
|
| 393 |
|
| 394 |
if __name__ == '__main__':
|
| 395 |
main()
|
|
|
|
| 23 |
import os
|
| 24 |
from sentence_transformers import SentenceTransformer, util
|
| 25 |
import textstat
|
| 26 |
+
from spellchecker import SpellChecker
|
| 27 |
from transformers import pipeline
|
| 28 |
|
| 29 |
print("***************************************************************")
|
|
|
|
| 40 |
|
| 41 |
@st.cache_resource
|
| 42 |
def load_model():
|
| 43 |
+
model_name = "DevBM/t5-small-squad"
|
| 44 |
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
| 45 |
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 46 |
return model, tokenizer
|
|
|
|
| 58 |
# Initialize BERT model for sentence similarity
|
| 59 |
similarity_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 60 |
|
| 61 |
+
spell = SpellChecker()
|
| 62 |
+
return similarity_model, spell
|
|
|
|
|
|
|
| 63 |
|
| 64 |
nlp, s2v = load_nlp_models()
|
| 65 |
model, tokenizer = load_model()
|
| 66 |
+
similarity_model, spell = load_qa_models()
|
| 67 |
|
| 68 |
def save_feedback(question, answer,rating):
|
| 69 |
feedback_file = 'question_feedback.json'
|
|
|
|
| 249 |
# Assess complexity using token length (as a simple metric)
|
| 250 |
complexity_score = min(len(question_doc) / 20, 1) # Normalize to 0-1
|
| 251 |
|
| 252 |
+
# Assess Spelling correctness
|
| 253 |
+
misspelled = spell.unknown(question.split())
|
| 254 |
+
spelling_correctness = 1 - (len(misspelled) / len(question.split())) # Normalize to 0-1
|
| 255 |
|
| 256 |
# Calculate overall score (you can adjust weights as needed)
|
| 257 |
overall_score = (
|
| 258 |
0.4 * relevance_score +
|
| 259 |
+
0.4 * complexity_score +
|
| 260 |
+
0.2 * spelling_correctness
|
| 261 |
)
|
| 262 |
|
| 263 |
+
return overall_score, relevance_score, complexity_score, spelling_correctness
|
| 264 |
|
| 265 |
def main():
|
| 266 |
# Streamlit interface
|
|
|
|
| 301 |
break
|
| 302 |
question = generate_question(context, keyword, num_beams=num_beams)
|
| 303 |
options = generate_options(keyword,context)
|
| 304 |
+
overall_score, relevance_score, complexity_score, spelling_correctness = assess_question_quality(context,question,keyword)
|
| 305 |
tpl = {
|
| 306 |
"question" : question,
|
| 307 |
"context" : context,
|
|
|
|
| 310 |
"overall_score" : overall_score,
|
| 311 |
"relevance_score" : relevance_score,
|
| 312 |
"complexity_score" : complexity_score,
|
| 313 |
+
"spelling_correctness" : spelling_correctness,
|
| 314 |
}
|
| 315 |
st.session_state.generated_questions.append(tpl)
|
| 316 |
|
|
|
|
| 337 |
st.write(f"**Overall Quality Score:** {q['overall_score']:.2f}")
|
| 338 |
st.write(f"**Relevance Score:** {q['relevance_score']:.2f}")
|
| 339 |
st.write(f"**Complexity Score:** {q['complexity_score']:.2f}")
|
| 340 |
+
st.write(f"**Spelling Correctness:** {q['spelling_correctness']:.2f}")
|
| 341 |
|
| 342 |
# q['context'] = st.text_area(f"Edit Context {i+1}:", value=q['context'], key=f"context_{i}")
|
| 343 |
if enable_feedback_mode:
|
|
|
|
| 387 |
st.write("No feedback data available yet.")
|
| 388 |
|
| 389 |
print("********************************************************************************")
|
|
|
|
| 390 |
|
| 391 |
if __name__ == '__main__':
|
| 392 |
main()
|