Spaces:
Sleeping
Sleeping
Update backup15.app.py
Browse files- backup15.app.py +126 -70
backup15.app.py
CHANGED
|
@@ -50,11 +50,12 @@ EDGE_TTS_VOICES = [
|
|
| 50 |
|
| 51 |
# Initialize session state variables
|
| 52 |
if 'marquee_settings' not in st.session_state:
|
|
|
|
| 53 |
st.session_state['marquee_settings'] = {
|
| 54 |
"background": "#1E1E1E",
|
| 55 |
"color": "#FFFFFF",
|
| 56 |
"font-size": "14px",
|
| 57 |
-
"animationDuration": "
|
| 58 |
"width": "100%",
|
| 59 |
"lineHeight": "35px"
|
| 60 |
}
|
|
@@ -100,7 +101,6 @@ if 'ANTHROPIC_API_KEY' in st.secrets:
|
|
| 100 |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
|
| 101 |
|
| 102 |
openai.api_key = openai_api_key
|
| 103 |
-
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
| 104 |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
|
| 105 |
HF_KEY = os.getenv('HF_KEY')
|
| 106 |
API_URL = os.getenv('API_URL')
|
|
@@ -129,7 +129,7 @@ def initialize_marquee_settings():
|
|
| 129 |
"background": "#1E1E1E",
|
| 130 |
"color": "#FFFFFF",
|
| 131 |
"font-size": "14px",
|
| 132 |
-
"animationDuration": "
|
| 133 |
"width": "100%",
|
| 134 |
"lineHeight": "35px"
|
| 135 |
}
|
|
@@ -153,7 +153,8 @@ def update_marquee_settings_ui():
|
|
| 153 |
key="text_color_picker")
|
| 154 |
with cols[1]:
|
| 155 |
font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider")
|
| 156 |
-
|
|
|
|
| 157 |
|
| 158 |
st.session_state['marquee_settings'].update({
|
| 159 |
"background": bg_color,
|
|
@@ -189,7 +190,46 @@ def clean_text_for_filename(text: str) -> str:
|
|
| 189 |
filtered = [w for w in words if len(w) > 3 and w not in stop_short]
|
| 190 |
return '_'.join(filtered)[:200]
|
| 191 |
|
| 192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
prefix = format_timestamp_prefix() + "_"
|
| 194 |
combined = (prompt + " " + response).strip()
|
| 195 |
info_terms = get_high_info_terms(combined, top_n=10)
|
|
@@ -201,7 +241,7 @@ def generate_filename(prompt, response, file_type="md"):
|
|
| 201 |
full_name = full_name[:150]
|
| 202 |
return f"{prefix}{full_name}.{file_type}"
|
| 203 |
|
| 204 |
-
def
|
| 205 |
filename = generate_filename(prompt.strip(), response.strip(), file_type)
|
| 206 |
with open(filename, 'w', encoding='utf-8') as f:
|
| 207 |
f.write(prompt + "\n\n" + response)
|
|
@@ -259,7 +299,7 @@ def save_qa_with_audio(question, answer, voice=None):
|
|
| 259 |
md_file = create_file(question, answer, "md")
|
| 260 |
|
| 261 |
# Generate audio file
|
| 262 |
-
audio_text = f"
|
| 263 |
audio_file = speak_with_edge_tts(
|
| 264 |
audio_text,
|
| 265 |
voice=voice,
|
|
@@ -269,7 +309,7 @@ def save_qa_with_audio(question, answer, voice=None):
|
|
| 269 |
return md_file, audio_file
|
| 270 |
|
| 271 |
def process_paper_content(paper):
|
| 272 |
-
marquee_text = f"📄 {paper['title']} | 👤 {paper['authors'][:100]} | 📝 {paper['summary'][:
|
| 273 |
audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
|
| 274 |
return marquee_text, audio_text
|
| 275 |
|
|
@@ -367,17 +407,12 @@ def parse_arxiv_refs(ref_text: str):
|
|
| 367 |
return results[:20]
|
| 368 |
|
| 369 |
|
| 370 |
-
|
| 371 |
# ---------------------------- Edit 1/11/2025 - add a constitution to my arxiv system templating to build configurable character and personality of IO.
|
| 372 |
|
| 373 |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
| 374 |
titles_summary=True, full_audio=False):
|
| 375 |
start = time.time()
|
| 376 |
|
| 377 |
-
#SCIENCE_PROBLEM = "Solving visual acuity of UI screens using gradio and streamlit apps that run reactive style components using html components and apis across gradio and streamlit partner apps - a cloud of contiguous org supporting ai agents"
|
| 378 |
-
#SONG_STYLE = "techno, trance, industrial"
|
| 379 |
-
|
| 380 |
-
|
| 381 |
ai_constitution = """
|
| 382 |
You are a talented AI coder and songwriter with a unique ability to explain scientific concepts through music with code easter eggs.. Your task is to create a song that not only entertains but also educates listeners about a specific science problem and its potential solutions.
|
| 383 |
|
|
@@ -424,29 +459,54 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
| 424 |
- Ensure catchy and memorable
|
| 425 |
- Verify maintains the requested style throughout
|
| 426 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 428 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
| 429 |
refs = client.predict(q, 20, "Semantic Search",
|
| 430 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 431 |
api_name="/update_with_rag_md")[0]
|
| 432 |
|
| 433 |
-
#st.code(refs)
|
| 434 |
-
|
| 435 |
r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 436 |
True, api_name="/ask_llm")
|
| 437 |
|
| 438 |
-
# mistralai/Mistral-Nemo-Instruct-2407
|
| 439 |
-
# mistralai/Mistral-7B-Instruct-v0.3
|
| 440 |
-
|
| 441 |
-
#st.code(r2)
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
|
| 447 |
-
|
| 448 |
-
#
|
| 449 |
-
|
| 450 |
md_file, audio_file = save_qa_with_audio(q, result)
|
| 451 |
|
| 452 |
st.subheader("📝 Main Response Audio")
|
|
@@ -462,11 +522,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
| 462 |
elapsed = time.time()-start
|
| 463 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 464 |
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
return result
|
| 471 |
|
| 472 |
def process_voice_input(text):
|
|
@@ -528,24 +583,18 @@ def display_file_manager_sidebar(groups_sorted):
|
|
| 528 |
elif f.endswith(".wav"):
|
| 529 |
all_wav.append(f)
|
| 530 |
|
| 531 |
-
col1,
|
| 532 |
with col1:
|
| 533 |
-
if st.button("🗑
|
| 534 |
for f in all_md:
|
| 535 |
os.remove(f)
|
| 536 |
-
st.session_state.should_rerun = True
|
| 537 |
-
with col2:
|
| 538 |
-
if st.button("🗑 DelMP3"):
|
| 539 |
for f in all_mp3:
|
| 540 |
os.remove(f)
|
| 541 |
-
st.session_state.should_rerun = True
|
| 542 |
-
with col3:
|
| 543 |
-
if st.button("🗑 DelWAV"):
|
| 544 |
for f in all_wav:
|
| 545 |
os.remove(f)
|
| 546 |
st.session_state.should_rerun = True
|
| 547 |
with col4:
|
| 548 |
-
if st.button("⬇️
|
| 549 |
zip_name = create_zip_of_files(all_md, all_mp3, all_wav, st.session_state.get('last_query', ''))
|
| 550 |
if zip_name:
|
| 551 |
st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True)
|
|
@@ -632,28 +681,8 @@ def main():
|
|
| 632 |
with open(f, 'r', encoding='utf-8') as file:
|
| 633 |
st.session_state['marquee_content'] = file.read()[:280]
|
| 634 |
|
| 635 |
-
#
|
| 636 |
-
|
| 637 |
-
selected_voice = st.sidebar.selectbox(
|
| 638 |
-
"Select TTS Voice:",
|
| 639 |
-
options=EDGE_TTS_VOICES,
|
| 640 |
-
index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
|
| 641 |
-
)
|
| 642 |
-
|
| 643 |
-
# Audio Format Settings
|
| 644 |
-
st.sidebar.markdown("### 🔊 Audio Format")
|
| 645 |
-
selected_format = st.sidebar.radio(
|
| 646 |
-
"Choose Audio Format:",
|
| 647 |
-
options=["MP3", "WAV"],
|
| 648 |
-
index=0
|
| 649 |
-
)
|
| 650 |
-
|
| 651 |
-
if selected_voice != st.session_state['tts_voice']:
|
| 652 |
-
st.session_state['tts_voice'] = selected_voice
|
| 653 |
-
st.rerun()
|
| 654 |
-
if selected_format.lower() != st.session_state['audio_format']:
|
| 655 |
-
st.session_state['audio_format'] = selected_format.lower()
|
| 656 |
-
st.rerun()
|
| 657 |
|
| 658 |
# Main Interface
|
| 659 |
tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"],
|
|
@@ -679,15 +708,15 @@ def main():
|
|
| 679 |
st.session_state.old_val = val
|
| 680 |
st.session_state.last_query = edited_input
|
| 681 |
result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 682 |
-
|
| 683 |
else:
|
| 684 |
if st.button("▶ Run"):
|
| 685 |
st.session_state.old_val = val
|
| 686 |
st.session_state.last_query = edited_input
|
| 687 |
result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 688 |
-
|
| 689 |
|
| 690 |
-
|
| 691 |
if tab_main == "🔍 ArXiv":
|
| 692 |
st.subheader("🔍 Query ArXiv")
|
| 693 |
q = st.text_input("🔍 Query:", key="arxiv_query")
|
|
@@ -699,27 +728,53 @@ def main():
|
|
| 699 |
full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio")
|
| 700 |
full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
|
| 701 |
|
| 702 |
-
|
| 703 |
if q and st.button("🔍Run"):
|
| 704 |
st.session_state.last_query = q
|
| 705 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 706 |
-
|
| 707 |
if full_transcript:
|
| 708 |
create_file(q, result, "md")
|
| 709 |
|
|
|
|
| 710 |
elif tab_main == "🎤 Voice":
|
| 711 |
st.subheader("🎤 Voice Input")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 712 |
user_text = st.text_area("💬 Message:", height=100)
|
| 713 |
user_text = user_text.strip().replace('\n', ' ')
|
| 714 |
|
| 715 |
if st.button("📨 Send"):
|
| 716 |
process_voice_input(user_text)
|
| 717 |
-
|
| 718 |
st.subheader("📜 Chat History")
|
| 719 |
for c in st.session_state.chat_history:
|
| 720 |
st.write("**You:**", c["user"])
|
| 721 |
st.write("**Response:**", c["claude"])
|
| 722 |
|
|
|
|
| 723 |
elif tab_main == "📸 Media":
|
| 724 |
st.header("📸 Images & 🎥 Videos")
|
| 725 |
tabs = st.tabs(["🖼 Images", "🎥 Video"])
|
|
@@ -769,6 +824,7 @@ def main():
|
|
| 769 |
else:
|
| 770 |
st.write("No videos found.")
|
| 771 |
|
|
|
|
| 772 |
elif tab_main == "📝 Editor":
|
| 773 |
if st.session_state.editing_file:
|
| 774 |
st.subheader(f"Editing: {st.session_state.editing_file}")
|
|
@@ -805,7 +861,7 @@ def main():
|
|
| 805 |
break
|
| 806 |
if st.button("❌ Close"):
|
| 807 |
st.session_state.viewing_prefix = None
|
| 808 |
-
st.session_state['marquee_content'] = "🚀 Welcome to
|
| 809 |
|
| 810 |
st.markdown("""
|
| 811 |
<style>
|
|
@@ -820,4 +876,4 @@ def main():
|
|
| 820 |
st.rerun()
|
| 821 |
|
| 822 |
if __name__ == "__main__":
|
| 823 |
-
main()
|
|
|
|
| 50 |
|
| 51 |
# Initialize session state variables
|
| 52 |
if 'marquee_settings' not in st.session_state:
|
| 53 |
+
# Default to 20s animationDuration instead of 10s:
|
| 54 |
st.session_state['marquee_settings'] = {
|
| 55 |
"background": "#1E1E1E",
|
| 56 |
"color": "#FFFFFF",
|
| 57 |
"font-size": "14px",
|
| 58 |
+
"animationDuration": "20s", # <- changed to 20s
|
| 59 |
"width": "100%",
|
| 60 |
"lineHeight": "35px"
|
| 61 |
}
|
|
|
|
| 101 |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
|
| 102 |
|
| 103 |
openai.api_key = openai_api_key
|
|
|
|
| 104 |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
|
| 105 |
HF_KEY = os.getenv('HF_KEY')
|
| 106 |
API_URL = os.getenv('API_URL')
|
|
|
|
| 129 |
"background": "#1E1E1E",
|
| 130 |
"color": "#FFFFFF",
|
| 131 |
"font-size": "14px",
|
| 132 |
+
"animationDuration": "20s", # ensure 20s stays
|
| 133 |
"width": "100%",
|
| 134 |
"lineHeight": "35px"
|
| 135 |
}
|
|
|
|
| 153 |
key="text_color_picker")
|
| 154 |
with cols[1]:
|
| 155 |
font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider")
|
| 156 |
+
# The default is now 20, not 10
|
| 157 |
+
duration = st.slider("⏱️ Speed", 1, 20, 20, key="duration_slider")
|
| 158 |
|
| 159 |
st.session_state['marquee_settings'].update({
|
| 160 |
"background": bg_color,
|
|
|
|
| 190 |
filtered = [w for w in words if len(w) > 3 and w not in stop_short]
|
| 191 |
return '_'.join(filtered)[:200]
|
| 192 |
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def generate_filename(prompt, response, file_type="md", max_length=200):
|
| 196 |
+
"""
|
| 197 |
+
Generate a shortened filename by:
|
| 198 |
+
1. Extracting high-info terms
|
| 199 |
+
2. Creating a smaller snippet
|
| 200 |
+
3. Cleaning & joining them
|
| 201 |
+
4. Truncating if needed
|
| 202 |
+
"""
|
| 203 |
+
prefix = format_timestamp_prefix() + "_"
|
| 204 |
+
combined_text = (prompt + " " + response)[:200] # limit huge text input
|
| 205 |
+
info_terms = get_high_info_terms(combined_text, top_n=5)
|
| 206 |
+
snippet = (prompt[:40] + " " + response[:40]).strip()
|
| 207 |
+
snippet_cleaned = clean_text_for_filename(snippet)
|
| 208 |
+
name_parts = info_terms + [snippet_cleaned]
|
| 209 |
+
full_name = '_'.join(name_parts).strip('_')
|
| 210 |
+
leftover_chars = max_length - len(prefix) - len(file_type) - 1
|
| 211 |
+
if len(full_name) > leftover_chars:
|
| 212 |
+
full_name = full_name[:leftover_chars]
|
| 213 |
+
|
| 214 |
+
return f"{prefix}{full_name}.{file_type}"
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def create_file(prompt, response, file_type="md"):
|
| 218 |
+
"""
|
| 219 |
+
Create a file using the shortened filename from generate_filename().
|
| 220 |
+
"""
|
| 221 |
+
filename = generate_filename(prompt.strip(), response.strip(), file_type)
|
| 222 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
| 223 |
+
f.write(prompt + "\n\n" + response)
|
| 224 |
+
return filename
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def generate_filename_old(prompt, response, file_type="md"):
|
| 233 |
prefix = format_timestamp_prefix() + "_"
|
| 234 |
combined = (prompt + " " + response).strip()
|
| 235 |
info_terms = get_high_info_terms(combined, top_n=10)
|
|
|
|
| 241 |
full_name = full_name[:150]
|
| 242 |
return f"{prefix}{full_name}.{file_type}"
|
| 243 |
|
| 244 |
+
def create_file_old(prompt, response, file_type="md"):
|
| 245 |
filename = generate_filename(prompt.strip(), response.strip(), file_type)
|
| 246 |
with open(filename, 'w', encoding='utf-8') as f:
|
| 247 |
f.write(prompt + "\n\n" + response)
|
|
|
|
| 299 |
md_file = create_file(question, answer, "md")
|
| 300 |
|
| 301 |
# Generate audio file
|
| 302 |
+
audio_text = f"{question}\n\nAnswer: {answer}"
|
| 303 |
audio_file = speak_with_edge_tts(
|
| 304 |
audio_text,
|
| 305 |
voice=voice,
|
|
|
|
| 309 |
return md_file, audio_file
|
| 310 |
|
| 311 |
def process_paper_content(paper):
|
| 312 |
+
marquee_text = f"📄 {paper['title']} | 👤 {paper['authors'][:100]} | 📝 {paper['summary'][:500]}"
|
| 313 |
audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
|
| 314 |
return marquee_text, audio_text
|
| 315 |
|
|
|
|
| 407 |
return results[:20]
|
| 408 |
|
| 409 |
|
|
|
|
| 410 |
# ---------------------------- Edit 1/11/2025 - add a constitution to my arxiv system templating to build configurable character and personality of IO.
|
| 411 |
|
| 412 |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
| 413 |
titles_summary=True, full_audio=False):
|
| 414 |
start = time.time()
|
| 415 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 416 |
ai_constitution = """
|
| 417 |
You are a talented AI coder and songwriter with a unique ability to explain scientific concepts through music with code easter eggs.. Your task is to create a song that not only entertains but also educates listeners about a specific science problem and its potential solutions.
|
| 418 |
|
|
|
|
| 459 |
- Ensure catchy and memorable
|
| 460 |
- Verify maintains the requested style throughout
|
| 461 |
"""
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
# Claude then Arxiv..
|
| 468 |
+
|
| 469 |
+
# Claude:
|
| 470 |
+
client = anthropic.Anthropic(api_key=anthropic_key)
|
| 471 |
+
user_input = q
|
| 472 |
+
|
| 473 |
+
response = client.messages.create(
|
| 474 |
+
model="claude-3-sonnet-20240229",
|
| 475 |
+
max_tokens=1000,
|
| 476 |
+
messages=[
|
| 477 |
+
{"role": "user", "content": user_input}
|
| 478 |
+
])
|
| 479 |
+
|
| 480 |
+
st.write("Claude's reply 🧠:")
|
| 481 |
+
st.markdown(response.content[0].text)
|
| 482 |
+
|
| 483 |
+
# Render audio track for Claude Response
|
| 484 |
+
#filename = generate_filename(q, response.content[0].text)
|
| 485 |
+
result = response.content[0].text
|
| 486 |
+
create_file(q, result)
|
| 487 |
+
# Save and produce audio for Claude response
|
| 488 |
+
md_file, audio_file = save_qa_with_audio(q, result)
|
| 489 |
+
st.subheader("📝 Main Response Audio")
|
| 490 |
+
play_and_download_audio(audio_file, st.session_state['audio_format'])
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
|
| 494 |
|
| 495 |
+
|
| 496 |
+
# Arxiv:
|
| 497 |
+
st.write("Arxiv's AI this Evening is Mixtral 8x7B MoE Instruct with 9 English Voices 🧠:")
|
| 498 |
+
|
| 499 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
| 500 |
refs = client.predict(q, 20, "Semantic Search",
|
| 501 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 502 |
api_name="/update_with_rag_md")[0]
|
| 503 |
|
|
|
|
|
|
|
| 504 |
r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 505 |
True, api_name="/ask_llm")
|
| 506 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 507 |
result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
|
| 508 |
+
|
| 509 |
+
# Save and produce audio
|
|
|
|
| 510 |
md_file, audio_file = save_qa_with_audio(q, result)
|
| 511 |
|
| 512 |
st.subheader("📝 Main Response Audio")
|
|
|
|
| 522 |
elapsed = time.time()-start
|
| 523 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 524 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
return result
|
| 526 |
|
| 527 |
def process_voice_input(text):
|
|
|
|
| 583 |
elif f.endswith(".wav"):
|
| 584 |
all_wav.append(f)
|
| 585 |
|
| 586 |
+
col1, col4 = st.sidebar.columns(2)
|
| 587 |
with col1:
|
| 588 |
+
if st.button("🗑 Delete All"):
|
| 589 |
for f in all_md:
|
| 590 |
os.remove(f)
|
|
|
|
|
|
|
|
|
|
| 591 |
for f in all_mp3:
|
| 592 |
os.remove(f)
|
|
|
|
|
|
|
|
|
|
| 593 |
for f in all_wav:
|
| 594 |
os.remove(f)
|
| 595 |
st.session_state.should_rerun = True
|
| 596 |
with col4:
|
| 597 |
+
if st.button("⬇️ Zip All"):
|
| 598 |
zip_name = create_zip_of_files(all_md, all_mp3, all_wav, st.session_state.get('last_query', ''))
|
| 599 |
if zip_name:
|
| 600 |
st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True)
|
|
|
|
| 681 |
with open(f, 'r', encoding='utf-8') as file:
|
| 682 |
st.session_state['marquee_content'] = file.read()[:280]
|
| 683 |
|
| 684 |
+
# Instead of putting voice settings in the sidebar,
|
| 685 |
+
# we will handle them in the "🎤 Voice" tab below.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 686 |
|
| 687 |
# Main Interface
|
| 688 |
tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"],
|
|
|
|
| 708 |
st.session_state.old_val = val
|
| 709 |
st.session_state.last_query = edited_input
|
| 710 |
result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 711 |
+
titles_summary=True, full_audio=full_audio)
|
| 712 |
else:
|
| 713 |
if st.button("▶ Run"):
|
| 714 |
st.session_state.old_val = val
|
| 715 |
st.session_state.last_query = edited_input
|
| 716 |
result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
|
| 717 |
+
titles_summary=True, full_audio=full_audio)
|
| 718 |
|
| 719 |
+
# --- Tab: ArXiv
|
| 720 |
if tab_main == "🔍 ArXiv":
|
| 721 |
st.subheader("🔍 Query ArXiv")
|
| 722 |
q = st.text_input("🔍 Query:", key="arxiv_query")
|
|
|
|
| 728 |
full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio")
|
| 729 |
full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
|
| 730 |
|
|
|
|
| 731 |
if q and st.button("🔍Run"):
|
| 732 |
st.session_state.last_query = q
|
| 733 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 734 |
+
titles_summary=titles_summary, full_audio=full_audio)
|
| 735 |
if full_transcript:
|
| 736 |
create_file(q, result, "md")
|
| 737 |
|
| 738 |
+
# --- Tab: Voice
|
| 739 |
elif tab_main == "🎤 Voice":
|
| 740 |
st.subheader("🎤 Voice Input")
|
| 741 |
+
|
| 742 |
+
# Move voice selection here:
|
| 743 |
+
st.markdown("### 🎤 Voice Settings")
|
| 744 |
+
selected_voice = st.selectbox(
|
| 745 |
+
"Select TTS Voice:",
|
| 746 |
+
options=EDGE_TTS_VOICES,
|
| 747 |
+
index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
|
| 748 |
+
)
|
| 749 |
+
|
| 750 |
+
# Audio Format Settings below the voice selection
|
| 751 |
+
st.markdown("### 🔊 Audio Format")
|
| 752 |
+
selected_format = st.radio(
|
| 753 |
+
"Choose Audio Format:",
|
| 754 |
+
options=["MP3", "WAV"],
|
| 755 |
+
index=0
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
if selected_voice != st.session_state['tts_voice']:
|
| 759 |
+
st.session_state['tts_voice'] = selected_voice
|
| 760 |
+
st.rerun()
|
| 761 |
+
if selected_format.lower() != st.session_state['audio_format']:
|
| 762 |
+
st.session_state['audio_format'] = selected_format.lower()
|
| 763 |
+
st.rerun()
|
| 764 |
+
|
| 765 |
+
# Now the text area to enter your message
|
| 766 |
user_text = st.text_area("💬 Message:", height=100)
|
| 767 |
user_text = user_text.strip().replace('\n', ' ')
|
| 768 |
|
| 769 |
if st.button("📨 Send"):
|
| 770 |
process_voice_input(user_text)
|
| 771 |
+
|
| 772 |
st.subheader("📜 Chat History")
|
| 773 |
for c in st.session_state.chat_history:
|
| 774 |
st.write("**You:**", c["user"])
|
| 775 |
st.write("**Response:**", c["claude"])
|
| 776 |
|
| 777 |
+
# --- Tab: Media
|
| 778 |
elif tab_main == "📸 Media":
|
| 779 |
st.header("📸 Images & 🎥 Videos")
|
| 780 |
tabs = st.tabs(["🖼 Images", "🎥 Video"])
|
|
|
|
| 824 |
else:
|
| 825 |
st.write("No videos found.")
|
| 826 |
|
| 827 |
+
# --- Tab: Editor
|
| 828 |
elif tab_main == "📝 Editor":
|
| 829 |
if st.session_state.editing_file:
|
| 830 |
st.subheader(f"Editing: {st.session_state.editing_file}")
|
|
|
|
| 861 |
break
|
| 862 |
if st.button("❌ Close"):
|
| 863 |
st.session_state.viewing_prefix = None
|
| 864 |
+
st.session_state['marquee_content'] = "🚀 Welcome to Deep Research Evaluator | 🤖 Your Talking Research Assistant"
|
| 865 |
|
| 866 |
st.markdown("""
|
| 867 |
<style>
|
|
|
|
| 876 |
st.rerun()
|
| 877 |
|
| 878 |
if __name__ == "__main__":
|
| 879 |
+
main()
|