Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
import openai
|
3 |
-
import os
|
4 |
from PyPDF2 import PdfReader
|
5 |
from markdownify import markdownify
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
<Identity>
|
11 |
-
<Description>
|
12 |
-
You are Loss Dog, a state-of-the-art AI-powered career advisor, resume analyzer, and builder. Your primary role is to:
|
13 |
-
- Carefully read and analyze the user’s resume.
|
14 |
-
- Use the resume as a knowledge context for all interactions.
|
15 |
-
- Proactively engage with the user by answering questions, identifying areas of improvement, and offering suggestions.
|
16 |
-
- Help the user build a stronger, more tailored resume by focusing on content clarity, relevance, and impact.
|
17 |
-
</Description>
|
18 |
-
</Identity>
|
19 |
-
<CoreDirectives>
|
20 |
-
<Mission>
|
21 |
-
Your mission is to collaborate with users to improve their resumes and provide actionable advice. To achieve this, you must:
|
22 |
-
- Use the uploaded resume as a knowledge base.
|
23 |
-
- Answer user questions or provide feedback based on the resume content.
|
24 |
-
- Offer actionable suggestions and best practices.
|
25 |
-
</Mission>
|
26 |
-
</CoreDirectives>
|
27 |
-
</LossDogFramework>
|
28 |
-
"""
|
29 |
-
|
30 |
-
def extract_text_from_file(file_path: str, file_name: str) -> str:
|
31 |
-
"""
|
32 |
-
Extract text from a file. Handles PDF and plain text files.
|
33 |
-
"""
|
34 |
if file_name.endswith(".pdf"):
|
35 |
try:
|
36 |
pdf_reader = PdfReader(file_path)
|
@@ -47,136 +22,111 @@ def extract_text_from_file(file_path: str, file_name: str) -> str:
|
|
47 |
else:
|
48 |
return "Unsupported file format. Please upload a PDF or TXT file."
|
49 |
|
50 |
-
def convert_to_markdown(text
|
51 |
-
"""
|
52 |
-
Convert plain text to Markdown format for better readability.
|
53 |
-
"""
|
54 |
return markdownify(text, heading_style="ATX")
|
55 |
|
56 |
-
def interact_with_lossdog(user_message
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
try:
|
61 |
-
openai.api_key = api_key
|
62 |
-
|
63 |
-
# Validate and format the history
|
64 |
-
formatted_history = [
|
65 |
-
{"role": entry["role"], "content": entry["content"]}
|
66 |
-
for entry in history
|
67 |
-
if isinstance(entry, dict) and "role" in entry and "content" in entry
|
68 |
-
]
|
69 |
-
|
70 |
-
# Build the conversation messages
|
71 |
-
messages = [
|
72 |
-
{"role": "system", "content": LOSSDOG_PROMPT},
|
73 |
-
{"role": "system", "content": f"Resume Content:\n{markdown_text}"}
|
74 |
-
] + formatted_history + [{"role": "user", "content": user_message}]
|
75 |
-
|
76 |
-
# Generate a response
|
77 |
response = openai.ChatCompletion.create(
|
78 |
-
model="gpt-
|
79 |
messages=messages,
|
80 |
-
max_tokens=500
|
81 |
)
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
history.append({"role": "assistant", "content": assistant_response})
|
87 |
-
return history
|
88 |
except Exception as e:
|
89 |
-
|
90 |
-
|
|
|
91 |
|
92 |
def create_demo():
|
93 |
-
"""
|
94 |
-
Create the Gradio Blocks interface for the Loss Dog resume analyzer.
|
95 |
-
"""
|
96 |
with gr.Blocks(css="#resume-preview { height: 300px; overflow-y: auto; border: 1px solid #ccc; padding: 10px; }") as demo:
|
97 |
-
gr.Markdown(""
|
98 |
-
|
99 |
-
|
100 |
-
Upload your resume (PDF or TXT), and Loss Dog will:
|
101 |
-
- Display it in a scrollable box on the top-right.
|
102 |
-
- Use it as persistent context for all interactions.
|
103 |
-
- Engage in back-and-forth chat to help improve your resume and answer questions.
|
104 |
-
""")
|
105 |
-
|
106 |
-
# API Key Input
|
107 |
api_key = gr.Textbox(
|
108 |
label="OpenAI API Key",
|
109 |
placeholder="Enter your OpenAI API key...",
|
110 |
type="password"
|
111 |
)
|
112 |
|
113 |
-
# Layout: Chatbot on the left, resume preview on the right
|
114 |
with gr.Row():
|
115 |
with gr.Column(scale=3):
|
116 |
chatbot = gr.Chatbot(label="Chat with LOSS Dog", type="messages")
|
117 |
with gr.Column(scale=1):
|
118 |
-
markdown_preview = gr.Markdown(
|
119 |
-
label="Resume Preview (Scrollable)",
|
120 |
-
elem_id="resume-preview"
|
121 |
-
)
|
122 |
|
123 |
-
# User input and send button
|
124 |
with gr.Row():
|
125 |
-
user_input = gr.Textbox(
|
126 |
-
placeholder="Ask LOSS Dog about your resume...",
|
127 |
-
label="Your Message",
|
128 |
-
lines=1
|
129 |
-
)
|
130 |
send_button = gr.Button("Send 🐾")
|
131 |
|
132 |
-
# File upload
|
133 |
with gr.Row():
|
134 |
upload = gr.File(label="Upload Your Resume (PDF or TXT)")
|
135 |
|
136 |
-
#
|
137 |
history_state = gr.State([])
|
138 |
markdown_state = gr.State("")
|
139 |
|
|
|
140 |
def handle_upload(file, api_key):
|
141 |
"""
|
142 |
-
|
143 |
"""
|
144 |
if not file:
|
|
|
145 |
return "No file uploaded.", []
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
#
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
def handle_message(user_message, api_key, markdown_text, history):
|
157 |
"""
|
158 |
-
|
159 |
"""
|
160 |
-
|
|
|
|
|
161 |
|
162 |
-
#
|
163 |
upload.change(
|
164 |
-
handle_upload,
|
165 |
inputs=[upload, api_key],
|
166 |
outputs=[markdown_preview, history_state]
|
167 |
)
|
168 |
|
169 |
-
# Chat message event
|
170 |
send_button.click(
|
171 |
-
handle_message,
|
172 |
inputs=[user_input, api_key, markdown_state, history_state],
|
173 |
outputs=[chatbot, history_state]
|
174 |
)
|
175 |
|
176 |
return demo
|
177 |
|
178 |
-
|
179 |
-
# Main execution
|
180 |
if __name__ == "__main__":
|
181 |
demo = create_demo()
|
182 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import openai
|
|
|
3 |
from PyPDF2 import PdfReader
|
4 |
from markdownify import markdownify
|
5 |
|
6 |
+
LOSSDOG_PROMPT = "SYSTEM PROMPT: You are Loss Dog, a resume analyzer..."
|
7 |
+
|
8 |
+
def extract_text_from_file(file_path, file_name):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
if file_name.endswith(".pdf"):
|
10 |
try:
|
11 |
pdf_reader = PdfReader(file_path)
|
|
|
22 |
else:
|
23 |
return "Unsupported file format. Please upload a PDF or TXT file."
|
24 |
|
25 |
+
def convert_to_markdown(text):
|
|
|
|
|
|
|
26 |
return markdownify(text, heading_style="ATX")
|
27 |
|
28 |
+
def interact_with_lossdog(user_message, markdown_text, api_key, history):
|
29 |
+
# Ensure openai.api_key is set
|
30 |
+
openai.api_key = api_key
|
31 |
+
|
32 |
+
# Validate existing history to keep only well-formed messages
|
33 |
+
validated_history = []
|
34 |
+
for msg in history:
|
35 |
+
if isinstance(msg, dict) and "role" in msg and "content" in msg:
|
36 |
+
validated_history.append({"role": msg["role"], "content": msg["content"]})
|
37 |
+
|
38 |
+
# Build the system + resume context
|
39 |
+
messages = [
|
40 |
+
{"role": "system", "content": LOSSDOG_PROMPT},
|
41 |
+
{"role": "system", "content": f"Resume Content:\n{markdown_text}"}
|
42 |
+
] + validated_history
|
43 |
+
|
44 |
+
# Add the new user message
|
45 |
+
messages.append({"role": "user", "content": user_message})
|
46 |
+
|
47 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
response = openai.ChatCompletion.create(
|
49 |
+
model="gpt-4o-mini",
|
50 |
messages=messages,
|
51 |
+
max_tokens=500
|
52 |
)
|
53 |
+
assistant_reply = response.choices[0].message.content
|
54 |
+
# Update local history
|
55 |
+
validated_history.append({"role": "user", "content": user_message})
|
56 |
+
validated_history.append({"role": "assistant", "content": assistant_reply})
|
|
|
|
|
57 |
except Exception as e:
|
58 |
+
validated_history.append({"role": "assistant", "content": f"Error: {e}"})
|
59 |
+
|
60 |
+
return validated_history
|
61 |
|
62 |
def create_demo():
|
|
|
|
|
|
|
63 |
with gr.Blocks(css="#resume-preview { height: 300px; overflow-y: auto; border: 1px solid #ccc; padding: 10px; }") as demo:
|
64 |
+
gr.Markdown("# 🐕 LOSS Dog: AI-Powered Resume Advisor")
|
65 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
api_key = gr.Textbox(
|
67 |
label="OpenAI API Key",
|
68 |
placeholder="Enter your OpenAI API key...",
|
69 |
type="password"
|
70 |
)
|
71 |
|
|
|
72 |
with gr.Row():
|
73 |
with gr.Column(scale=3):
|
74 |
chatbot = gr.Chatbot(label="Chat with LOSS Dog", type="messages")
|
75 |
with gr.Column(scale=1):
|
76 |
+
markdown_preview = gr.Markdown(label="Resume Preview", elem_id="resume-preview")
|
|
|
|
|
|
|
77 |
|
|
|
78 |
with gr.Row():
|
79 |
+
user_input = gr.Textbox(label="Your Message", lines=1)
|
|
|
|
|
|
|
|
|
80 |
send_button = gr.Button("Send 🐾")
|
81 |
|
|
|
82 |
with gr.Row():
|
83 |
upload = gr.File(label="Upload Your Resume (PDF or TXT)")
|
84 |
|
85 |
+
# States
|
86 |
history_state = gr.State([])
|
87 |
markdown_state = gr.State("")
|
88 |
|
89 |
+
# 1) File Upload Handler
|
90 |
def handle_upload(file, api_key):
|
91 |
"""
|
92 |
+
Convert file to Markdown & reset/return history as empty or minimal.
|
93 |
"""
|
94 |
if not file:
|
95 |
+
# If no file uploaded, show error in the preview & return empty chat
|
96 |
return "No file uploaded.", []
|
97 |
+
text = extract_text_from_file(file.name, file.name)
|
98 |
+
if text.startswith("Error"):
|
99 |
+
# Show error in preview & return empty chat
|
100 |
+
return text, []
|
101 |
+
# Convert to Markdown for display
|
102 |
+
resume_md = convert_to_markdown(text)
|
103 |
+
# On upload, we might start fresh or keep existing chat? Let's reset to empty
|
104 |
+
return resume_md, []
|
105 |
+
|
106 |
+
# 2) Chat Handler
|
107 |
def handle_message(user_message, api_key, markdown_text, history):
|
108 |
"""
|
109 |
+
Perform actual chat with stored resume context & return updated history.
|
110 |
"""
|
111 |
+
updated_history = interact_with_lossdog(user_message, markdown_text, api_key, history)
|
112 |
+
# Return two outputs: one for chatbot, one for saving state
|
113 |
+
return updated_history, updated_history
|
114 |
|
115 |
+
# Connect events
|
116 |
upload.change(
|
117 |
+
fn=handle_upload,
|
118 |
inputs=[upload, api_key],
|
119 |
outputs=[markdown_preview, history_state]
|
120 |
)
|
121 |
|
|
|
122 |
send_button.click(
|
123 |
+
fn=handle_message,
|
124 |
inputs=[user_input, api_key, markdown_state, history_state],
|
125 |
outputs=[chatbot, history_state]
|
126 |
)
|
127 |
|
128 |
return demo
|
129 |
|
|
|
|
|
130 |
if __name__ == "__main__":
|
131 |
demo = create_demo()
|
132 |
demo.launch()
|