Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -27,10 +27,18 @@ LOSSDOG_PROMPT = """
|
|
27 |
"""
|
28 |
|
29 |
def extract_text_from_file(file_path: str, file_name: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
if file_name.endswith(".pdf"):
|
31 |
try:
|
32 |
pdf_reader = PdfReader(file_path)
|
33 |
-
text = "".join(
|
34 |
return text
|
35 |
except Exception as e:
|
36 |
return f"Error extracting text from PDF: {str(e)}"
|
@@ -44,22 +52,38 @@ def extract_text_from_file(file_path: str, file_name: str) -> str:
|
|
44 |
return "Unsupported file format. Please upload a PDF or TXT file."
|
45 |
|
46 |
def convert_to_markdown(text: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
return markdownify(text, heading_style="ATX")
|
48 |
|
49 |
def analyze_resume_with_lossdog(
|
50 |
markdown_text: str, api_key: str, history: list
|
51 |
) -> list:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
try:
|
53 |
openai.api_key = api_key
|
54 |
messages = [
|
55 |
-
{
|
56 |
{"role": "system", "content": f"Resume Content:\n{markdown_text}"},
|
57 |
] + history
|
58 |
|
59 |
response = openai.ChatCompletion.create(
|
60 |
model="gpt-4o-mini",
|
61 |
messages=messages,
|
62 |
-
max_tokens=
|
63 |
)
|
64 |
assistant_response = response.choices[0].message.content
|
65 |
history.append({"role": "assistant", "content": assistant_response})
|
@@ -69,6 +93,13 @@ def analyze_resume_with_lossdog(
|
|
69 |
return history
|
70 |
|
71 |
def generate_json_output(history: list) -> tuple:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
73 |
filename = f"resume_analysis_{timestamp}.json"
|
74 |
try:
|
@@ -80,6 +111,9 @@ def generate_json_output(history: list) -> tuple:
|
|
80 |
return None, f"Error generating JSON: {str(e)}"
|
81 |
|
82 |
def create_demo():
|
|
|
|
|
|
|
83 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
84 |
gr.Markdown("""
|
85 |
# 🐕 LOSS Dog: AI-Powered Resume Analyzer
|
@@ -112,11 +146,9 @@ def create_demo():
|
|
112 |
|
113 |
with gr.Row():
|
114 |
json_view = gr.JSON(label="Final JSON Summary")
|
115 |
-
download_link = gr.Textbox(label="Download Link (Copy & Paste)")
|
116 |
|
117 |
history_state = gr.State([])
|
118 |
markdown_state = gr.State("")
|
119 |
-
file_path_state = gr.State("")
|
120 |
|
121 |
def handle_upload(file, api_key):
|
122 |
if not file:
|
@@ -144,10 +176,7 @@ def create_demo():
|
|
144 |
|
145 |
def handle_generate_json(history):
|
146 |
filename, json_content = generate_json_output(history)
|
147 |
-
|
148 |
-
return json_content, ""
|
149 |
-
download_url = f"/download/{filename}"
|
150 |
-
return json_content, download_url
|
151 |
|
152 |
upload.change(
|
153 |
handle_upload,
|
@@ -161,14 +190,10 @@ def create_demo():
|
|
161 |
outputs=[chatbot]
|
162 |
)
|
163 |
|
164 |
-
send_button.click(
|
165 |
-
handle_generate_json,
|
166 |
-
inputs=[history_state],
|
167 |
-
outputs=[json_view, download_link]
|
168 |
-
)
|
169 |
-
|
170 |
return demo
|
171 |
|
|
|
|
|
172 |
if __name__ == "__main__":
|
173 |
demo = create_demo()
|
174 |
demo.launch()
|
|
|
27 |
"""
|
28 |
|
29 |
def extract_text_from_file(file_path: str, file_name: str) -> str:
|
30 |
+
"""
|
31 |
+
Extract text from a file. Handles PDF and plain text files.
|
32 |
+
Args:
|
33 |
+
file_path (str): The path to the file.
|
34 |
+
file_name (str): The name of the file.
|
35 |
+
Returns:
|
36 |
+
str: Extracted text content from the file.
|
37 |
+
"""
|
38 |
if file_name.endswith(".pdf"):
|
39 |
try:
|
40 |
pdf_reader = PdfReader(file_path)
|
41 |
+
text = "".join(page.extract_text() for page in pdf_reader.pages)
|
42 |
return text
|
43 |
except Exception as e:
|
44 |
return f"Error extracting text from PDF: {str(e)}"
|
|
|
52 |
return "Unsupported file format. Please upload a PDF or TXT file."
|
53 |
|
54 |
def convert_to_markdown(text: str) -> str:
|
55 |
+
"""
|
56 |
+
Convert plain text to Markdown format for better readability.
|
57 |
+
Args:
|
58 |
+
text (str): The plain text to convert.
|
59 |
+
Returns:
|
60 |
+
str: The Markdown-formatted text.
|
61 |
+
"""
|
62 |
return markdownify(text, heading_style="ATX")
|
63 |
|
64 |
def analyze_resume_with_lossdog(
|
65 |
markdown_text: str, api_key: str, history: list
|
66 |
) -> list:
|
67 |
+
"""
|
68 |
+
Analyze the resume using OpenAI GPT and generate conversational responses.
|
69 |
+
Args:
|
70 |
+
markdown_text (str): Markdown content of the resume.
|
71 |
+
api_key (str): OpenAI API key.
|
72 |
+
history (list): Chat history between the user and the assistant.
|
73 |
+
Returns:
|
74 |
+
list: Updated chat history with AI responses.
|
75 |
+
"""
|
76 |
try:
|
77 |
openai.api_key = api_key
|
78 |
messages = [
|
79 |
+
{"role": "system", "content": LOSSDOG_PROMPT},
|
80 |
{"role": "system", "content": f"Resume Content:\n{markdown_text}"},
|
81 |
] + history
|
82 |
|
83 |
response = openai.ChatCompletion.create(
|
84 |
model="gpt-4o-mini",
|
85 |
messages=messages,
|
86 |
+
max_tokens=50000,
|
87 |
)
|
88 |
assistant_response = response.choices[0].message.content
|
89 |
history.append({"role": "assistant", "content": assistant_response})
|
|
|
93 |
return history
|
94 |
|
95 |
def generate_json_output(history: list) -> tuple:
|
96 |
+
"""
|
97 |
+
Generate a JSON summary of the chat history and save it to a file.
|
98 |
+
Args:
|
99 |
+
history (list): Chat history between the user and the assistant.
|
100 |
+
Returns:
|
101 |
+
tuple: File name and content of the generated JSON file.
|
102 |
+
"""
|
103 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
104 |
filename = f"resume_analysis_{timestamp}.json"
|
105 |
try:
|
|
|
111 |
return None, f"Error generating JSON: {str(e)}"
|
112 |
|
113 |
def create_demo():
|
114 |
+
"""
|
115 |
+
Create the Gradio Blocks interface for the Loss Dog resume analyzer.
|
116 |
+
"""
|
117 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
118 |
gr.Markdown("""
|
119 |
# 🐕 LOSS Dog: AI-Powered Resume Analyzer
|
|
|
146 |
|
147 |
with gr.Row():
|
148 |
json_view = gr.JSON(label="Final JSON Summary")
|
|
|
149 |
|
150 |
history_state = gr.State([])
|
151 |
markdown_state = gr.State("")
|
|
|
152 |
|
153 |
def handle_upload(file, api_key):
|
154 |
if not file:
|
|
|
176 |
|
177 |
def handle_generate_json(history):
|
178 |
filename, json_content = generate_json_output(history)
|
179 |
+
return json_content
|
|
|
|
|
|
|
180 |
|
181 |
upload.change(
|
182 |
handle_upload,
|
|
|
190 |
outputs=[chatbot]
|
191 |
)
|
192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
return demo
|
194 |
|
195 |
+
|
196 |
+
# Main execution
|
197 |
if __name__ == "__main__":
|
198 |
demo = create_demo()
|
199 |
demo.launch()
|