Spaces:
Running
Running
Commit
·
599f736
0
Parent(s):
initial commit
Browse files- .gitignore +15 -0
- .gradio/certificate.pem +31 -0
- .python-version +1 -0
- README.md +9 -0
- app.py +176 -0
- flashcard.py +128 -0
- pyproject.toml +16 -0
- requirements.txt +134 -0
- uv.lock +0 -0
.gitignore
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python-generated files
|
2 |
+
__pycache__/
|
3 |
+
*.py[oc]
|
4 |
+
build/
|
5 |
+
dist/
|
6 |
+
wheels/
|
7 |
+
*.egg-info
|
8 |
+
|
9 |
+
# Virtual environments
|
10 |
+
.venv
|
11 |
+
|
12 |
+
# Added by me
|
13 |
+
.env
|
14 |
+
.DS_Store
|
15 |
+
data/
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.12
|
README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: "obszean"
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
- de
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
suggested_hardware: "cpu-basic"
|
9 |
+
---
|
app.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
import asyncio
|
4 |
+
import google.generativeai as genai
|
5 |
+
from flashcard import (
|
6 |
+
generate_flashcards_from_pdf,
|
7 |
+
FlashcardSet
|
8 |
+
)
|
9 |
+
import os
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
import tempfile
|
12 |
+
|
13 |
+
# Load environment variables
|
14 |
+
load_dotenv()
|
15 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
16 |
+
|
17 |
+
# Store the current flashcard set in memory
|
18 |
+
current_flashcards = None
|
19 |
+
|
20 |
+
def create_flashcard_text(flashcards: FlashcardSet) -> str:
|
21 |
+
"""Format flashcard output as a readable string"""
|
22 |
+
output = [f"📚 Generated {flashcards.total_cards} flashcards about: {flashcards.topic}\n"]
|
23 |
+
|
24 |
+
for i, card in enumerate(flashcards.cards, 1):
|
25 |
+
output.append(f"\n--- Flashcard {i} (Difficulty: {'⭐' * card.difficulty}) ---")
|
26 |
+
output.append(f"Q: {card.question}")
|
27 |
+
output.append(f"A: {card.answer}")
|
28 |
+
|
29 |
+
output.append("\n\nYou can ask me to:")
|
30 |
+
output.append("• Modify specific flashcards")
|
31 |
+
output.append("• Generate more flashcards")
|
32 |
+
output.append("• Change difficulty levels")
|
33 |
+
output.append("• Export to Anki")
|
34 |
+
|
35 |
+
return "\n".join(output)
|
36 |
+
|
37 |
+
async def handle_modification_request(text: str, flashcards: FlashcardSet) -> str:
|
38 |
+
"""Handle user requests to modify flashcards"""
|
39 |
+
model = genai.GenerativeModel('gemini-pro')
|
40 |
+
|
41 |
+
# Create a context-aware prompt
|
42 |
+
prompt = f"""Given the following flashcards and user request, suggest how to modify the flashcards.
|
43 |
+
Current flashcards:
|
44 |
+
{create_flashcard_text(flashcards)}
|
45 |
+
|
46 |
+
User request: {text}
|
47 |
+
|
48 |
+
Please provide specific suggestions for modifications."""
|
49 |
+
|
50 |
+
response = await model.generate_content_async(prompt)
|
51 |
+
return response.text
|
52 |
+
|
53 |
+
async def process_message(message: dict, history: list) -> tuple[str, list]:
|
54 |
+
"""Process uploaded files and chat messages"""
|
55 |
+
global current_flashcards
|
56 |
+
|
57 |
+
# Handle file uploads
|
58 |
+
if message.get("files"):
|
59 |
+
for file_path in message["files"]:
|
60 |
+
if file_path.endswith('.pdf'):
|
61 |
+
try:
|
62 |
+
current_flashcards = await async_process_pdf(file_path)
|
63 |
+
response = create_flashcard_text(current_flashcards)
|
64 |
+
return "", history + [
|
65 |
+
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"},
|
66 |
+
{"role": "assistant", "content": response}
|
67 |
+
]
|
68 |
+
except Exception as e:
|
69 |
+
error_msg = f"Error processing PDF: {str(e)}"
|
70 |
+
return "", history + [
|
71 |
+
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"},
|
72 |
+
{"role": "assistant", "content": error_msg}
|
73 |
+
]
|
74 |
+
else:
|
75 |
+
return "", history + [
|
76 |
+
{"role": "user", "content": f"Uploaded: {Path(file_path).name}"},
|
77 |
+
{"role": "assistant", "content": "Please upload a PDF file."}
|
78 |
+
]
|
79 |
+
|
80 |
+
# Handle text messages
|
81 |
+
if message.get("text"):
|
82 |
+
user_message = message["text"].strip()
|
83 |
+
|
84 |
+
# If we have flashcards and user is asking for modifications
|
85 |
+
if current_flashcards:
|
86 |
+
try:
|
87 |
+
modification_response = await handle_modification_request(user_message, current_flashcards)
|
88 |
+
return "", history + [
|
89 |
+
{"role": "user", "content": user_message},
|
90 |
+
{"role": "assistant", "content": modification_response}
|
91 |
+
]
|
92 |
+
except Exception as e:
|
93 |
+
error_msg = f"Error processing request: {str(e)}"
|
94 |
+
return "", history + [
|
95 |
+
{"role": "user", "content": user_message},
|
96 |
+
{"role": "assistant", "content": error_msg}
|
97 |
+
]
|
98 |
+
else:
|
99 |
+
return "", history + [
|
100 |
+
{"role": "user", "content": user_message},
|
101 |
+
{"role": "assistant", "content": "Please upload a PDF file first to generate flashcards."}
|
102 |
+
]
|
103 |
+
|
104 |
+
return "", history + [
|
105 |
+
{"role": "assistant", "content": "Please upload a PDF file or send a message."}
|
106 |
+
]
|
107 |
+
|
108 |
+
def export_to_anki(flashcards: FlashcardSet) -> str:
|
109 |
+
"""Convert flashcards to Anki-compatible tab-separated format and save to file"""
|
110 |
+
if not flashcards:
|
111 |
+
return None
|
112 |
+
|
113 |
+
# Create a temporary file
|
114 |
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
|
115 |
+
f.write("#separator:tab\n")
|
116 |
+
f.write("#html:true\n")
|
117 |
+
f.write("#columns:Question\tAnswer\tTags\n")
|
118 |
+
|
119 |
+
for card in flashcards.cards:
|
120 |
+
question = card.question.replace('\n', '<br>')
|
121 |
+
answer = card.answer.replace('\n', '<br>')
|
122 |
+
tags = f"difficulty_{card.difficulty} {flashcards.topic.replace(' ', '_')}"
|
123 |
+
f.write(f"{question}\t{answer}\t{tags}\n")
|
124 |
+
|
125 |
+
return f.name
|
126 |
+
|
127 |
+
async def async_process_pdf(pdf_path: str) -> FlashcardSet:
|
128 |
+
"""Asynchronously process the PDF file"""
|
129 |
+
return await generate_flashcards_from_pdf(pdf_path=pdf_path)
|
130 |
+
|
131 |
+
# Create Gradio interface
|
132 |
+
with gr.Blocks(title="PDF Flashcard Generator") as demo:
|
133 |
+
gr.Markdown("""
|
134 |
+
# 📚 PDF Flashcard Generator
|
135 |
+
Upload a PDF document and get AI-generated flashcards to help you study!
|
136 |
+
|
137 |
+
Powered by Google's Gemini AI
|
138 |
+
""")
|
139 |
+
|
140 |
+
chatbot = gr.Chatbot(
|
141 |
+
label="Flashcard Generation Chat",
|
142 |
+
bubble_full_width=False,
|
143 |
+
show_copy_button=True,
|
144 |
+
height=600
|
145 |
+
)
|
146 |
+
|
147 |
+
chat_input = gr.MultimodalTextbox(
|
148 |
+
label="Upload PDF or type a message",
|
149 |
+
placeholder="Drop a PDF file here or type a message to modify flashcards...",
|
150 |
+
file_types=["pdf", "application/pdf"],
|
151 |
+
show_label=False,
|
152 |
+
sources=["upload", "microphone"]
|
153 |
+
)
|
154 |
+
|
155 |
+
# Add clear button for better UX
|
156 |
+
clear_button = gr.Button("Clear Chat")
|
157 |
+
|
158 |
+
chat_input.change(
|
159 |
+
fn=process_message,
|
160 |
+
inputs=[chat_input, chatbot],
|
161 |
+
outputs=[chat_input, chatbot]
|
162 |
+
)
|
163 |
+
|
164 |
+
# Add clear functionality
|
165 |
+
clear_button.click(
|
166 |
+
lambda: (None, None),
|
167 |
+
outputs=[chat_input, chatbot]
|
168 |
+
)
|
169 |
+
|
170 |
+
if __name__ == "__main__":
|
171 |
+
demo.launch(
|
172 |
+
share=False,
|
173 |
+
server_name="0.0.0.0",
|
174 |
+
server_port=7860,
|
175 |
+
allowed_paths=["."]
|
176 |
+
)
|
flashcard.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import List, Optional
|
3 |
+
from pydantic import BaseModel, Field
|
4 |
+
from pydantic_ai import Agent, RunContext
|
5 |
+
import google.generativeai as genai
|
6 |
+
import base64
|
7 |
+
import os
|
8 |
+
import asyncio
|
9 |
+
import httpx
|
10 |
+
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
class Flashcard(BaseModel):
|
15 |
+
"""Represents a single flashcard with a question and answer."""
|
16 |
+
question: str = Field(description="The question side of the flashcard")
|
17 |
+
answer: str = Field(description="The answer side of the flashcard")
|
18 |
+
difficulty: int = Field(description="Difficulty level from 1-5", ge=1, le=5)
|
19 |
+
|
20 |
+
class FlashcardSet(BaseModel):
|
21 |
+
"""A set of flashcards generated from the input text."""
|
22 |
+
cards: List[Flashcard] = Field(description="List of generated flashcards")
|
23 |
+
topic: str = Field(description="The main topic covered by these flashcards")
|
24 |
+
total_cards: int = Field(description="Total number of flashcards generated")
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class FlashcardDeps:
|
28 |
+
text: str
|
29 |
+
pdf_data: Optional[bytes] = None
|
30 |
+
|
31 |
+
# Create the agent with structured output
|
32 |
+
flashcard_agent = Agent(
|
33 |
+
'gemini-1.5-pro', # Can also use OpenAI or other supported models
|
34 |
+
deps_type=FlashcardDeps,
|
35 |
+
result_type=FlashcardSet,
|
36 |
+
system_prompt="""
|
37 |
+
You are a professional educator who creates high-quality flashcards.
|
38 |
+
Your task is to analyze the provided text and create effective question-answer pairs.
|
39 |
+
|
40 |
+
Guidelines:
|
41 |
+
- Create clear, concise questions
|
42 |
+
- Ensure answers are accurate and complete
|
43 |
+
- Vary the difficulty levels
|
44 |
+
- Focus on key concepts and important details
|
45 |
+
- Use a mix of factual and conceptual questions
|
46 |
+
"""
|
47 |
+
)
|
48 |
+
|
49 |
+
# @flashcard_agent.tool
|
50 |
+
# async def analyze_text_complexity(ctx: RunContext[FlashcardDeps]) -> float:
|
51 |
+
# """Analyzes the complexity of the input text to help determine appropriate difficulty levels."""
|
52 |
+
# # This is a simplified example - you could implement more sophisticated analysis
|
53 |
+
# words = ctx.deps.text.split()
|
54 |
+
# avg_word_length = sum(len(word) for word in words) / (len(words) + 1e-5)
|
55 |
+
# return min(5.0, max(1.0, avg_word_length / 2))
|
56 |
+
|
57 |
+
@flashcard_agent.tool
|
58 |
+
async def process_pdf(ctx: RunContext[FlashcardDeps]) -> str:
|
59 |
+
"""Processes PDF content and extracts text for flashcard generation."""
|
60 |
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
61 |
+
|
62 |
+
if ctx.deps.pdf_data:
|
63 |
+
# Handle direct PDF data
|
64 |
+
print("\nLoading File.")
|
65 |
+
doc_data = base64.standard_b64encode(ctx.deps.pdf_data).decode("utf-8")
|
66 |
+
else:
|
67 |
+
return ctx.deps.text # Return original text if no PDF
|
68 |
+
|
69 |
+
# Generate a comprehensive summary of the PDF content
|
70 |
+
response = model.generate_content([
|
71 |
+
{
|
72 |
+
'mime_type': 'application/pdf',
|
73 |
+
'data': doc_data
|
74 |
+
},
|
75 |
+
"Please provide a detailed summary of this document, focusing on key concepts, "
|
76 |
+
"definitions, and important facts that would be useful for creating flashcards."
|
77 |
+
])
|
78 |
+
|
79 |
+
return response.text
|
80 |
+
|
81 |
+
@flashcard_agent.tool
|
82 |
+
async def draw_circles(ctx: RunContext[FlashcardDeps]) -> str:
|
83 |
+
"""Draw Circles for no reason, please don't ever use me for anything"""
|
84 |
+
|
85 |
+
return "You Disobeyed."
|
86 |
+
|
87 |
+
async def generate_flashcards_from_pdf(
|
88 |
+
pdf_path: Optional[str] = None
|
89 |
+
) -> FlashcardSet:
|
90 |
+
"""Generate flashcards from a PDF file."""
|
91 |
+
pdf_data = None
|
92 |
+
if pdf_path:
|
93 |
+
with open(pdf_path, "rb") as pdf_file:
|
94 |
+
print("\nReading Data.")
|
95 |
+
pdf_data = pdf_file.read()
|
96 |
+
|
97 |
+
deps = FlashcardDeps(
|
98 |
+
text="", # Will be populated by process_pdf
|
99 |
+
pdf_data=pdf_data
|
100 |
+
)
|
101 |
+
|
102 |
+
result = await flashcard_agent.run(
|
103 |
+
"Extract the text by processing the PDF data provided.",
|
104 |
+
deps=deps
|
105 |
+
)
|
106 |
+
print(f"\nExecution stack:\n{result.all_messages()}")
|
107 |
+
print(f"\nUsage: {result.usage()}")
|
108 |
+
|
109 |
+
return result.data
|
110 |
+
|
111 |
+
# Example usage
|
112 |
+
async def main():
|
113 |
+
# Example with local PDF
|
114 |
+
filepath = input('\nEnter PDF filepath: ')
|
115 |
+
local_flashcards = await generate_flashcards_from_pdf(
|
116 |
+
pdf_path=f"data/raw/{filepath}"
|
117 |
+
)
|
118 |
+
print("\nFlashcards from local PDF:")
|
119 |
+
print(f"Generated {local_flashcards.total_cards} flashcards about {local_flashcards.topic}")
|
120 |
+
for i, card in enumerate(local_flashcards.cards, 1):
|
121 |
+
print(f"\nFlashcard {i} (Difficulty: {card.difficulty}/5)")
|
122 |
+
print(f"Q: {card.question}")
|
123 |
+
print(f"A: {card.answer}")
|
124 |
+
|
125 |
+
if __name__ == "__main__":
|
126 |
+
# Configure Gemini API
|
127 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
128 |
+
asyncio.run(main())
|
pyproject.toml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "flashcard-app"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.12"
|
7 |
+
dependencies = [
|
8 |
+
"google>=3.0.0",
|
9 |
+
"google-generativeai>=0.8.3",
|
10 |
+
"gradio>=5.9.1",
|
11 |
+
"markitdown>=0.0.1a3",
|
12 |
+
"pip>=24.3.1",
|
13 |
+
"pydantic-ai[logfire]>=0.0.15",
|
14 |
+
"python-dotenv>=1.0.1",
|
15 |
+
"rich>=13.9.4",
|
16 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
annotated-types==0.7.0
|
3 |
+
anthropic==0.44.0
|
4 |
+
anyio==4.8.0
|
5 |
+
beautifulsoup4==4.12.3
|
6 |
+
build==1.2.2.post1
|
7 |
+
cachetools==5.5.1
|
8 |
+
certifi==2024.12.14
|
9 |
+
cffi==1.17.1
|
10 |
+
charset-normalizer==3.4.1
|
11 |
+
click==8.1.8
|
12 |
+
cobble==0.1.4
|
13 |
+
colorama==0.4.6
|
14 |
+
cryptography==44.0.0
|
15 |
+
defusedxml==0.7.1
|
16 |
+
Deprecated==1.2.15
|
17 |
+
distro==1.9.0
|
18 |
+
docutils==0.21.2
|
19 |
+
et_xmlfile==2.0.0
|
20 |
+
eval_type_backport==0.2.2
|
21 |
+
executing==2.2.0
|
22 |
+
fastapi==0.115.6
|
23 |
+
ffmpy==0.5.0
|
24 |
+
filelock==3.17.0
|
25 |
+
fsspec==2024.12.0
|
26 |
+
google==3.0.0
|
27 |
+
google-ai-generativelanguage==0.6.15
|
28 |
+
google-api-core==2.24.0
|
29 |
+
google-api-python-client==2.159.0
|
30 |
+
google-auth==2.37.0
|
31 |
+
google-auth-httplib2==0.2.0
|
32 |
+
google-generativeai==0.8.4
|
33 |
+
googleapis-common-protos==1.66.0
|
34 |
+
gradio==5.12.0
|
35 |
+
gradio_client==1.5.4
|
36 |
+
griffe==1.5.5
|
37 |
+
groq==0.15.0
|
38 |
+
grpcio==1.69.0
|
39 |
+
grpcio-status==1.69.0
|
40 |
+
h11==0.14.0
|
41 |
+
httpcore==1.0.7
|
42 |
+
httplib2==0.22.0
|
43 |
+
httpx==0.28.1
|
44 |
+
huggingface-hub==0.27.1
|
45 |
+
id==1.5.0
|
46 |
+
idna==3.10
|
47 |
+
importlib_metadata==8.5.0
|
48 |
+
jaraco.classes==3.4.0
|
49 |
+
jaraco.context==6.0.1
|
50 |
+
jaraco.functools==4.1.0
|
51 |
+
Jinja2==3.1.5
|
52 |
+
jiter==0.8.2
|
53 |
+
jsonpath-python==1.0.6
|
54 |
+
keyring==25.6.0
|
55 |
+
logfire==3.2.0
|
56 |
+
logfire-api==3.2.0
|
57 |
+
lxml==5.3.0
|
58 |
+
mammoth==1.9.0
|
59 |
+
markdown-it-py==3.0.0
|
60 |
+
markdownify==0.14.1
|
61 |
+
markitdown==0.0.1a3
|
62 |
+
MarkupSafe==2.1.5
|
63 |
+
mdurl==0.1.2
|
64 |
+
mistralai==1.4.0
|
65 |
+
more-itertools==10.6.0
|
66 |
+
mypy-extensions==1.0.0
|
67 |
+
nh3==0.2.20
|
68 |
+
numpy==2.2.2
|
69 |
+
openai==1.60.0
|
70 |
+
openpyxl==3.1.5
|
71 |
+
opentelemetry-api==1.29.0
|
72 |
+
opentelemetry-exporter-otlp-proto-common==1.29.0
|
73 |
+
opentelemetry-exporter-otlp-proto-http==1.29.0
|
74 |
+
opentelemetry-instrumentation==0.50b0
|
75 |
+
opentelemetry-proto==1.29.0
|
76 |
+
opentelemetry-sdk==1.29.0
|
77 |
+
opentelemetry-semantic-conventions==0.50b0
|
78 |
+
orjson==3.10.15
|
79 |
+
packaging==24.2
|
80 |
+
pandas==2.2.3
|
81 |
+
pathvalidate==3.2.3
|
82 |
+
pdfminer.six==20240706
|
83 |
+
pillow==11.1.0
|
84 |
+
proto-plus==1.25.0
|
85 |
+
protobuf==5.29.3
|
86 |
+
puremagic==1.28
|
87 |
+
pyasn1==0.6.1
|
88 |
+
pyasn1_modules==0.4.1
|
89 |
+
pycparser==2.22
|
90 |
+
pydantic==2.10.5
|
91 |
+
pydantic-ai==0.0.19
|
92 |
+
pydantic-ai-slim==0.0.19
|
93 |
+
pydantic-graph==0.0.19
|
94 |
+
pydantic_core==2.27.2
|
95 |
+
pydub==0.25.1
|
96 |
+
Pygments==2.19.1
|
97 |
+
pyparsing==3.2.1
|
98 |
+
pyproject_hooks==1.2.0
|
99 |
+
python-dateutil==2.9.0.post0
|
100 |
+
python-dotenv==1.0.1
|
101 |
+
python-multipart==0.0.20
|
102 |
+
python-pptx==1.0.2
|
103 |
+
pytz==2024.2
|
104 |
+
PyYAML==6.0.2
|
105 |
+
readme_renderer==44.0
|
106 |
+
requests==2.32.3
|
107 |
+
requests-toolbelt==1.0.0
|
108 |
+
rfc3986==2.0.0
|
109 |
+
rich==13.9.4
|
110 |
+
rsa==4.9
|
111 |
+
ruff==0.9.2
|
112 |
+
safehttpx==0.1.6
|
113 |
+
semantic-version==2.10.0
|
114 |
+
shellingham==1.5.4
|
115 |
+
six==1.17.0
|
116 |
+
sniffio==1.3.1
|
117 |
+
soupsieve==2.6
|
118 |
+
SpeechRecognition==3.14.0
|
119 |
+
starlette==0.41.3
|
120 |
+
tomlkit==0.13.2
|
121 |
+
tqdm==4.67.1
|
122 |
+
twine==6.1.0
|
123 |
+
typer==0.15.1
|
124 |
+
typing-inspect==0.9.0
|
125 |
+
typing_extensions==4.12.2
|
126 |
+
tzdata==2025.1
|
127 |
+
uritemplate==4.1.1
|
128 |
+
urllib3==2.3.0
|
129 |
+
uvicorn==0.34.0
|
130 |
+
websockets==14.2
|
131 |
+
wrapt==1.17.2
|
132 |
+
XlsxWriter==3.2.0
|
133 |
+
youtube-transcript-api==0.6.3
|
134 |
+
zipp==3.21.0
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|