Spaces:
Runtime error
Runtime error
using session_id for all different users
Browse files
app.py
CHANGED
|
@@ -15,7 +15,6 @@ from nltk.tokenize import sent_tokenize
|
|
| 15 |
nltk.download('wordnet')
|
| 16 |
from nltk.corpus import wordnet
|
| 17 |
import random
|
| 18 |
-
from sense2vec import Sense2Vec
|
| 19 |
import sense2vec
|
| 20 |
from wordcloud import WordCloud
|
| 21 |
import matplotlib.pyplot as plt
|
|
@@ -27,6 +26,7 @@ from spellchecker import SpellChecker
|
|
| 27 |
from transformers import pipeline
|
| 28 |
import re
|
| 29 |
import pymupdf
|
|
|
|
| 30 |
print("***************************************************************")
|
| 31 |
|
| 32 |
st.set_page_config(
|
|
@@ -41,6 +41,27 @@ st.set_page_config(
|
|
| 41 |
user_agent = 'QGen/1.0 ([email protected])'
|
| 42 |
wiki_wiki = wikipediaapi.Wikipedia(user_agent= user_agent,language='en')
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
@st.cache_resource
|
| 46 |
def load_model():
|
|
@@ -314,7 +335,8 @@ def assess_question_quality(context, question, answer):
|
|
| 314 |
def main():
|
| 315 |
# Streamlit interface
|
| 316 |
st.title(":blue[Question Generator System]")
|
| 317 |
-
|
|
|
|
| 318 |
# Initialize session state
|
| 319 |
if 'generated_questions' not in st.session_state:
|
| 320 |
st.session_state.generated_questions = []
|
|
@@ -349,7 +371,8 @@ def main():
|
|
| 349 |
segments = segment_text(text)
|
| 350 |
generate_questions_button = st.button("Generate Questions")
|
| 351 |
if generate_questions_button and text:
|
| 352 |
-
|
|
|
|
| 353 |
for text in segments:
|
| 354 |
keywords = extract_keywords(text, extract_all_keywords)
|
| 355 |
print(f"\n\nFinal Keywords in Main Function: {keywords}\n\n")
|
|
@@ -372,12 +395,17 @@ def main():
|
|
| 372 |
"complexity_score" : complexity_score,
|
| 373 |
"spelling_correctness" : spelling_correctness,
|
| 374 |
}
|
| 375 |
-
st.session_state.generated_questions.append(tpl)
|
|
|
|
|
|
|
|
|
|
| 376 |
|
| 377 |
# sort question based on their quality score
|
| 378 |
-
st.session_state.generated_questions = sorted(st.session_state.generated_questions,key = lambda x: x['overall_score'], reverse=True)
|
|
|
|
| 379 |
# Display generated questions
|
| 380 |
-
if st.session_state.generated_questions:
|
|
|
|
| 381 |
st.header("Generated Questions:",divider='blue')
|
| 382 |
for i, q in enumerate(st.session_state.generated_questions):
|
| 383 |
# with st.expander(f"Question {i+1}"):
|
|
@@ -396,10 +424,11 @@ def main():
|
|
| 396 |
if linked_entity:
|
| 397 |
st.write(f"**Entity Link:** {linked_entity}")
|
| 398 |
if show_qa_scores is True:
|
| 399 |
-
st.
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
|
|
|
| 403 |
|
| 404 |
# q['context'] = st.text_area(f"Edit Context {i+1}:", value=q['context'], key=f"context_{i}")
|
| 405 |
if enable_feedback_mode:
|
|
@@ -411,7 +440,8 @@ def main():
|
|
| 411 |
st.write("---")
|
| 412 |
|
| 413 |
# Export buttons
|
| 414 |
-
if st.session_state.generated_questions:
|
|
|
|
| 415 |
with st.sidebar:
|
| 416 |
csv_data = export_to_csv(st.session_state.generated_questions)
|
| 417 |
st.download_button(label="Download CSV", data=csv_data, file_name='questions.csv', mime='text/csv')
|
|
|
|
| 15 |
nltk.download('wordnet')
|
| 16 |
from nltk.corpus import wordnet
|
| 17 |
import random
|
|
|
|
| 18 |
import sense2vec
|
| 19 |
from wordcloud import WordCloud
|
| 20 |
import matplotlib.pyplot as plt
|
|
|
|
| 26 |
from transformers import pipeline
|
| 27 |
import re
|
| 28 |
import pymupdf
|
| 29 |
+
import uuid
|
| 30 |
print("***************************************************************")
|
| 31 |
|
| 32 |
st.set_page_config(
|
|
|
|
| 41 |
user_agent = 'QGen/1.0 ([email protected])'
|
| 42 |
wiki_wiki = wikipediaapi.Wikipedia(user_agent= user_agent,language='en')
|
| 43 |
|
| 44 |
+
def get_session_id():
|
| 45 |
+
if 'session_id' not in st.session_state:
|
| 46 |
+
st.session_state.session_id = str(uuid.uuid4())
|
| 47 |
+
return st.session_state.session_id
|
| 48 |
+
|
| 49 |
+
def initialize_state(session_id):
|
| 50 |
+
if 'session_states' not in st.session_state:
|
| 51 |
+
st.session_state.session_states = {}
|
| 52 |
+
|
| 53 |
+
if session_id not in st.session_state.session_states:
|
| 54 |
+
st.session_state.session_states[session_id] = {
|
| 55 |
+
'generated_questions': [],
|
| 56 |
+
# add other state variables as needed
|
| 57 |
+
}
|
| 58 |
+
return st.session_state.session_states[session_id]
|
| 59 |
+
|
| 60 |
+
def get_state(session_id):
|
| 61 |
+
return st.session_state.session_states[session_id]
|
| 62 |
+
|
| 63 |
+
def set_state(session_id, key, value):
|
| 64 |
+
st.session_state.session_states[session_id][key] = value
|
| 65 |
|
| 66 |
@st.cache_resource
|
| 67 |
def load_model():
|
|
|
|
| 335 |
def main():
|
| 336 |
# Streamlit interface
|
| 337 |
st.title(":blue[Question Generator System]")
|
| 338 |
+
session_id = get_session_id()
|
| 339 |
+
state = initialize_state(session_id)
|
| 340 |
# Initialize session state
|
| 341 |
if 'generated_questions' not in st.session_state:
|
| 342 |
st.session_state.generated_questions = []
|
|
|
|
| 371 |
segments = segment_text(text)
|
| 372 |
generate_questions_button = st.button("Generate Questions")
|
| 373 |
if generate_questions_button and text:
|
| 374 |
+
state['generated_questions'] = []
|
| 375 |
+
# st.session_state.generated_questions = []
|
| 376 |
for text in segments:
|
| 377 |
keywords = extract_keywords(text, extract_all_keywords)
|
| 378 |
print(f"\n\nFinal Keywords in Main Function: {keywords}\n\n")
|
|
|
|
| 395 |
"complexity_score" : complexity_score,
|
| 396 |
"spelling_correctness" : spelling_correctness,
|
| 397 |
}
|
| 398 |
+
# st.session_state.generated_questions.append(tpl)
|
| 399 |
+
state['generated_questions'].append(tpl)
|
| 400 |
+
|
| 401 |
+
set_state(session_id, 'generated_questions', state['generated_questions'])
|
| 402 |
|
| 403 |
# sort question based on their quality score
|
| 404 |
+
# st.session_state.generated_questions = sorted(st.session_state.generated_questions,key = lambda x: x['overall_score'], reverse=True)
|
| 405 |
+
state['generated_questions'] = sorted(state['generated_questions'],key = lambda x: x['overall_score'], reverse=True)
|
| 406 |
# Display generated questions
|
| 407 |
+
# if st.session_state.generated_questions:
|
| 408 |
+
if state['generated_questions']:
|
| 409 |
st.header("Generated Questions:",divider='blue')
|
| 410 |
for i, q in enumerate(st.session_state.generated_questions):
|
| 411 |
# with st.expander(f"Question {i+1}"):
|
|
|
|
| 424 |
if linked_entity:
|
| 425 |
st.write(f"**Entity Link:** {linked_entity}")
|
| 426 |
if show_qa_scores is True:
|
| 427 |
+
m1,m2,m3,m4 = st.columns([1.7,1,1,1])
|
| 428 |
+
m1.metric("Overall Quality Score", value=f"{q['overall_score']:,.2f}")
|
| 429 |
+
m2.metric("Relevance Score", value=f"{q['relevance_score']:,.2f}")
|
| 430 |
+
m3.metric("Complexity Score", value=f"{q['complexity_score']:,.2f}")
|
| 431 |
+
m4.metric("Spelling Correctness", value=f"{q['spelling_correctness']:,.2f}")
|
| 432 |
|
| 433 |
# q['context'] = st.text_area(f"Edit Context {i+1}:", value=q['context'], key=f"context_{i}")
|
| 434 |
if enable_feedback_mode:
|
|
|
|
| 440 |
st.write("---")
|
| 441 |
|
| 442 |
# Export buttons
|
| 443 |
+
# if st.session_state.generated_questions:
|
| 444 |
+
if state['generated_questions']:
|
| 445 |
with st.sidebar:
|
| 446 |
csv_data = export_to_csv(st.session_state.generated_questions)
|
| 447 |
st.download_button(label="Download CSV", data=csv_data, file_name='questions.csv', mime='text/csv')
|