Spaces:
Build error
Build error
import streamlit as st | |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer | |
from TTS.api import TTS | |
from tempfile import NamedTemporaryFile | |
import os | |
# Initialize models | |
def load_models(): | |
summarizer_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") | |
summarizer_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") | |
question_generator = pipeline("text2text-generation", model="valhalla/t5-small-qg-hl") | |
qa_model = pipeline("question-answering", model="deepset/roberta-base-squad2") | |
tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False) | |
return summarizer_model, summarizer_tokenizer, question_generator, qa_model, tts | |
summarizer_model, summarizer_tokenizer, question_generator, qa_model, tts = load_models() | |
def summarize_document(text): | |
input_ids = summarizer_tokenizer.encode(text, return_tensors="pt", max_length=512, truncation=True) | |
summary_ids = summarizer_model.generate(input_ids, max_length=150, min_length=30, length_penalty=2.0, num_beams=4, early_stopping=True) | |
return summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
def generate_questions(summary): | |
questions = question_generator(summary) | |
return [q["generated_text"] for q in questions] | |
def generate_audio(text, voice_gender): | |
voice = "ljspeech" # Use default LJSpeech voice for both genders | |
audio_file = NamedTemporaryFile(delete=False, suffix=".wav") | |
tts.tts_to_file(text=text, file_path=audio_file.name) | |
return audio_file.name | |
def app(): | |
st.title("Interactive Document Summarizer") | |
uploaded_file = st.file_uploader("Upload a document", type=["txt", "pdf", "docx"]) | |
if uploaded_file: | |
raw_text = uploaded_file.read().decode("utf-8") | |
st.write("Processing document...") | |
# Summarize | |
summary = summarize_document(raw_text) | |
st.write("Summary Generated:") | |
st.write(summary) | |
# Generate dialogue | |
questions = generate_questions(summary) | |
dialogue = [] | |
for idx, question in enumerate(questions): | |
dialogue.append({"persona": "male" if idx % 2 == 0 else "female", "text": question}) | |
# Interactive simulation | |
st.write("Simulating Conversation:") | |
for item in dialogue: | |
st.write(f"{item['persona'].capitalize()} says: {item['text']}") | |
audio_path = generate_audio(item["text"], item["persona"]) | |
st.audio(audio_path, format="audio/wav") | |
if __name__ == "__main__": | |
app() | |