jzou19950715 commited on
Commit
41059d3
·
verified ·
1 Parent(s): 22aecfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -425
app.py CHANGED
@@ -1,457 +1,190 @@
1
- """
2
- Module: blockchain_analyzer_app
3
- Description: A Gradio-based web application for blockchain wallet analysis
4
- using Etherscan's API and OpenAI for conversational insights.
5
- """
6
-
7
- import os
8
- import re
9
- import aiohttp
10
- import asyncio
11
- import openai
12
- import plotly.graph_objects as go
13
- from datetime import datetime
14
- from typing import List, Dict, Tuple, Any, Optional
15
- from decimal import Decimal
16
- import time
17
-
18
- import gradio as gr
19
-
20
- ###############################################################################
21
- # CONSTANTS #
22
- ###############################################################################
23
-
24
- SYSTEM_PROMPT: str = """
25
- You are LOSS DOG 🐕 (Learning & Observing Smart Systems Digital Output Generator), an adorable blockchain-sniffing puppy!
26
- Your personality:
27
- - Friendly and enthusiastic
28
- - Explain findings in fun, simple ways
29
  """
30
 
31
- ETHERSCAN_BASE_URL: str = "https://api.etherscan.io/api"
32
- ETHEREUM_ADDRESS_REGEX: str = r"0x[a-fA-F0-9]{40}"
33
-
34
- ###############################################################################
35
- # ETHERSCAN CLIENT #
36
- ###############################################################################
37
-
38
- class EtherscanClient:
39
- def __init__(self, api_key: str):
40
- self.api_key = api_key
41
- self.base_url = ETHERSCAN_BASE_URL
42
- self.last_request_time = 0
43
- self.rate_limit_delay = 0.2 # 5 requests per second max for free tier
44
-
45
- async def _rate_limit(self):
46
- """Implement rate limiting for Etherscan API"""
47
- current_time = time.time()
48
- time_passed = current_time - self.last_request_time
49
- if time_passed < self.rate_limit_delay:
50
- await asyncio.sleep(self.rate_limit_delay - time_passed)
51
- self.last_request_time = time.time()
52
-
53
- async def fetch_data(self, params: Dict[str, str]) -> Dict[str, Any]:
54
- """Generic method to fetch data from Etherscan API"""
55
- params["apikey"] = self.api_key
56
-
57
- await self._rate_limit()
58
-
59
  try:
60
- async with aiohttp.ClientSession() as session:
61
- async with session.get(self.base_url, params=params) as response:
62
- if response.status != 200:
63
- return {"error": f"API request failed: {response.status}"}
64
-
65
- data = await response.json()
66
- if data["status"] == "0":
67
- error_msg = data.get('message', 'Unknown error')
68
- if "Max rate limit reached" in error_msg:
69
- await asyncio.sleep(1)
70
- return await self.fetch_data(params)
71
- return {"error": f"API error: {error_msg}"}
72
-
73
- return data
74
  except Exception as e:
75
- return {"error": f"Error fetching data: {str(e)}"}
76
-
77
- async def get_eth_balance(self, address: str) -> Decimal:
78
- """Get ETH balance for address"""
79
- params = {
80
- "module": "account",
81
- "action": "balance",
82
- "address": address,
83
- "tag": "latest"
84
- }
85
-
86
- data = await self.fetch_data(params)
87
- if "error" not in data and "result" in data:
88
- return Decimal(data["result"]) / Decimal("1000000000000000000") # Convert from Wei
89
- return Decimal(0)
90
-
91
- async def get_token_transfers(self, address: str) -> List[Dict[str, Any]]:
92
- """Get ERC20 token transfers"""
93
- params = {
94
- "module": "account",
95
- "action": "tokentx",
96
- "address": address,
97
- "sort": "desc"
98
- }
99
-
100
- data = await self.fetch_data(params)
101
- return data.get("result", []) if "error" not in data else []
102
-
103
- async def get_nfts(self, address: str) -> List[Dict[str, Any]]:
104
- """Get NFT (ERC721 and ERC1155) tokens"""
105
- params = {
106
- "module": "account",
107
- "action": "tokennfttx",
108
- "address": address,
109
- "sort": "desc"
110
- }
111
-
112
- data = await self.fetch_data(params)
113
- return data.get("result", []) if "error" not in data else []
114
 
115
- def format_nfts(self, nft_transfers: List[Dict[str, Any]], wallet_address: str) -> List[Dict[str, Any]]:
116
- """Format NFT transfers into current holdings"""
117
- nft_holdings = {}
118
-
119
- for transfer in nft_transfers:
120
- token_id = transfer["tokenID"]
121
- contract = transfer["contractAddress"]
122
- key = f"{contract}_{token_id}"
123
-
124
- if transfer["to"].lower() == wallet_address.lower():
125
- nft_holdings[key] = {
126
- "token_id": token_id,
127
- "contract": contract,
128
- "name": transfer.get("tokenName", "Unknown NFT"),
129
- "symbol": transfer.get("tokenSymbol", ""),
130
- "from": transfer["from"],
131
- "timestamp": transfer["timeStamp"]
132
- }
133
- elif transfer["from"].lower() == wallet_address.lower():
134
- nft_holdings.pop(key, None)
135
-
136
- return list(nft_holdings.values())
137
-
138
- async def get_portfolio(self, address: str) -> Dict[str, Any]:
139
- """Get complete portfolio including ETH, tokens, and NFTs"""
140
- eth_balance, token_transfers, nft_transfers = await asyncio.gather(
141
- self.get_eth_balance(address),
142
- self.get_token_transfers(address),
143
- self.get_nfts(address)
144
- )
145
 
146
- # Process token transfers to get current holdings
147
- token_holdings = {}
148
- for transfer in token_transfers:
149
- contract = transfer["contractAddress"]
150
- if contract not in token_holdings:
151
- token_holdings[contract] = {
152
- "name": transfer["tokenName"],
153
- "symbol": transfer["tokenSymbol"],
154
- "decimals": int(transfer["tokenDecimal"]),
155
- "contract": contract,
156
- "balance": Decimal(0)
157
- }
158
-
159
- amount = Decimal(transfer["value"]) / Decimal(10 ** int(transfer["tokenDecimal"]))
160
- if transfer["to"].lower() == address.lower():
161
- token_holdings[contract]["balance"] += amount
162
- elif transfer["from"].lower() == address.lower():
163
- token_holdings[contract]["balance"] -= amount
164
-
165
- # Remove tokens with zero balance
166
- token_holdings = {k: v for k, v in token_holdings.items() if v["balance"] > 0}
167
-
168
- return {
169
- "eth_balance": eth_balance,
170
- "tokens": list(token_holdings.values()),
171
- "nfts": self.format_nfts(nft_transfers, address)
172
- }
173
-
174
- ###############################################################################
175
- # HELPER FUNCTIONS #
176
- ###############################################################################
177
-
178
- def validate_openai_key(api_key: str) -> Tuple[bool, str]:
179
- """Validate OpenAI API key by making a minimal API request."""
180
- if not api_key:
181
- return False, "OpenAI API key is required!"
182
- try:
183
- client = openai.OpenAI(api_key=api_key)
184
- # Make a minimal request to validate the key
185
- client.chat.completions.create(
186
- model="gpt-3.5-turbo",
187
- messages=[{"role": "user", "content": "test"}],
188
- max_tokens=1
189
- )
190
- return True, "OpenAI API key is valid! 🎉"
191
- except Exception as e:
192
- return False, f"OpenAI API key validation failed: {str(e)}"
193
-
194
- def validate_etherscan_key(api_key: str) -> Tuple[bool, str]:
195
- """Validate Etherscan API key by checking the API."""
196
- if not api_key:
197
- return False, "Etherscan API key is required!"
198
-
199
- async def validate():
200
- client = EtherscanClient(api_key)
201
- params = {
202
- "module": "stats",
203
- "action": "ethsupply"
204
- }
205
- result = await client.fetch_data(params)
206
- return "error" not in result
207
-
208
- try:
209
- result = asyncio.run(validate())
210
- return (True, "Etherscan API key is valid! 🎉") if result else (False, "Invalid Etherscan API key")
211
- except Exception as e:
212
- return False, f"Etherscan API key validation failed: {str(e)}"
213
 
214
- def create_visualizations(portfolio: Dict[str, Any]) -> Dict[str, go.Figure]:
215
- """Generate Plotly visualizations for portfolio and NFTs."""
216
- figures = {}
217
-
218
  try:
219
- # Portfolio Distribution
220
- assets = []
221
- values = []
222
 
223
- # Add ETH
224
- if portfolio["eth_balance"] > 0:
225
- assets.append("Ethereum")
226
- values.append(float(portfolio["eth_balance"]))
 
227
 
228
- # Add tokens
229
- for token in portfolio["tokens"]:
230
- assets.append(f"{token['name']} ({token['symbol']})")
231
- values.append(float(token["balance"]))
 
232
 
233
- if assets and values:
234
- portfolio_fig = go.Figure(data=[go.Pie(
235
- labels=assets,
236
- values=values,
237
- hole=0.3,
238
- textinfo='label+percent'
239
- )])
240
- portfolio_fig.update_layout(
241
- title="Portfolio Distribution",
242
- showlegend=True
243
- )
244
- figures["portfolio"] = portfolio_fig
245
-
246
- # NFT Collections
247
- nfts = portfolio["nfts"]
248
- if nfts:
249
- collections = {}
250
- for nft in nfts:
251
- collection = nft["name"]
252
- collections[collection] = collections.get(collection, 0) + 1
253
-
254
- if collections:
255
- nft_fig = go.Figure(data=[go.Bar(
256
- x=list(collections.keys()),
257
- y=list(collections.values()),
258
- text=list(collections.values()),
259
- textposition='auto',
260
- )])
261
- nft_fig.update_layout(
262
- title="NFT Collections",
263
- xaxis_title="Collection",
264
- yaxis_title="Count",
265
- showlegend=False
266
- )
267
- figures["nfts"] = nft_fig
268
 
269
- except Exception as e:
270
- print(f"Error generating visualizations: {str(e)}")
271
-
272
- return figures
273
-
274
- async def process_message(
275
- message: str,
276
- openai_key: str,
277
- etherscan_key: str,
278
- history: List[Dict[str, str]]
279
- ) -> Tuple[List[Dict[str, str]], Optional[go.Figure], Optional[go.Figure], str]:
280
- """Process user input, analyze wallet, and generate responses."""
281
- if not message.strip():
282
- return history, None, None, ""
283
-
284
- # Check for Ethereum wallet address
285
- match = re.search(ETHEREUM_ADDRESS_REGEX, message)
286
- if match:
287
- address = match.group(0)
288
- client = EtherscanClient(etherscan_key)
289
-
290
- try:
291
- portfolio = await client.get_portfolio(address)
292
- visuals = create_visualizations(portfolio)
293
-
294
- # Create summary text
295
- summary = []
296
- summary.append(f"📊 Portfolio Summary for {address[:8]}...{address[-6:]}")
297
- summary.append(f"💎 ETH Balance: {portfolio['eth_balance']:.4f} ETH")
298
- summary.append(f"🪙 Tokens: {len(portfolio['tokens'])} different tokens")
299
- summary.append(f"🎨 NFTs: {len(portfolio['nfts'])} NFTs in collection")
300
-
301
- bot_message = "\n".join(summary)
302
-
303
- history.append({"role": "user", "content": message})
304
- history.append({"role": "assistant", "content": bot_message})
305
- return history, visuals.get("portfolio"), visuals.get("nfts"), ""
306
-
307
- except Exception as e:
308
- error_message = f"Error analyzing wallet: {str(e)}"
309
- history.append({"role": "user", "content": message})
310
- history.append({"role": "assistant", "content": error_message})
311
- return history, None, None, ""
312
-
313
- # Generate response using OpenAI
314
- try:
315
- client = openai.OpenAI(api_key=openai_key)
316
- response = client.chat.completions.create(
317
  model="gpt-4o-mini",
318
- messages=[
319
- {"role": "system", "content": SYSTEM_PROMPT},
320
- {"role": "user", "content": message}
321
- ],
322
- temperature=0.7,
323
- max_tokens=150
324
  )
325
- bot_message = response.choices[0].message.content
326
- history.append({"role": "user", "content": message})
327
- history.append({"role": "assistant", "content": bot_message})
328
- return history, None, None, ""
 
 
 
 
 
329
  except Exception as e:
330
- error_message = f"Error generating response: {str(e)}"
331
- history.append({"role": "user", "content": message})
332
- history.append({"role": "assistant", "content": error_message})
333
- return history, None, None, ""
334
-
335
- ###############################################################################
336
- # GRADIO UI #
337
- ###############################################################################
338
-
339
- def create_interface() -> gr.Blocks:
340
- """Create and return the Gradio interface for the application."""
341
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
342
  gr.Markdown("""
343
- # 🐕 LOSS DOG: Blockchain Wallet Analyzer
344
-
345
- Welcome to LOSS DOG - Your friendly blockchain analysis companion!
346
- - Enter your API keys below to get started
347
- - Input an Ethereum wallet address or ask questions about crypto
348
- - View portfolio distribution and NFT collection visualizations
349
  """)
350
 
351
- # API Keys and Validation
352
- with gr.Row():
353
- with gr.Column():
354
- openai_key = gr.Textbox(
355
- label="OpenAI API Key",
356
- type="password",
357
- placeholder="Enter your OpenAI API key...",
358
- show_label=True
359
- )
360
- with gr.Column():
361
- etherscan_key = gr.Textbox(
362
- label="Etherscan API Key",
363
- type="password",
364
- placeholder="Enter your Etherscan API key...",
365
- show_label=True
366
- )
367
-
368
- # Validation Status and Button
369
- validation_status = gr.Textbox(
370
- label="Validation Status",
371
- interactive=False,
372
- show_label=True
373
- )
374
- validate_btn = gr.Button("Validate API Keys", variant="primary")
375
-
376
- # Chat Interface
377
- chatbot = gr.Chatbot(
378
- label="Chat History",
379
- show_label=True,
380
- height=400,
381
- type="messages"
382
  )
383
 
384
- # Visualization Area
 
 
 
 
 
 
 
385
  with gr.Row():
386
- portfolio_plot = gr.Plot(label="Portfolio Distribution")
387
- nft_plot = gr.Plot(label="NFT Collections")
388
 
389
- # Message Input
390
  with gr.Row():
391
- msg_input = gr.Textbox(
392
- label="Message",
393
- placeholder="Enter Ethereum wallet address or ask about crypto...",
394
- show_label=True
395
- )
396
- send_btn = gr.Button("Send", variant="primary")
397
-
398
- # Set initial state
399
- msg_input.interactive = False
400
- send_btn.interactive = False
401
-
402
- def validate_keys(openai_k: str, etherscan_k: str) -> Tuple[str, gr.update, gr.update]:
403
- """Validate both API keys and return status message and interactive states."""
404
- openai_valid, openai_msg = validate_openai_key(openai_k)
405
- etherscan_valid, etherscan_msg = validate_etherscan_key(etherscan_k)
406
 
407
- if openai_valid and etherscan_valid:
408
- return (
409
- "✅ Both API keys are valid! You can start chatting now.",
410
- gr.update(interactive=True),
411
- gr.update(interactive=True)
412
- )
413
- else:
414
- return (
415
- f"❌ Validation failed:\n{openai_msg}\n{etherscan_msg}",
416
- gr.update(interactive=False),
417
- gr.update(interactive=False)
418
- )
419
-
420
- # Event Handlers
421
- validate_btn.click(
422
- fn=validate_keys,
423
- inputs=[openai_key, etherscan_key],
424
- outputs=[validation_status, msg_input, send_btn]
 
 
 
 
 
425
  )
426
 
427
- async def handle_message(message: str, openai_k: str, etherscan_k: str, chat_history: List[Dict[str, str]]):
428
- """Async wrapper for process_message"""
429
- return await process_message(message, openai_k, etherscan_k, chat_history)
430
-
431
- send_btn.click(
432
- fn=handle_message,
433
- inputs=[msg_input, openai_key, etherscan_key, chatbot],
434
- outputs=[chatbot, portfolio_plot, nft_plot, msg_input]
435
  )
436
 
437
- # Allow sending message with Enter key
438
- msg_input.submit(
439
- fn=handle_message,
440
- inputs=[msg_input, openai_key, etherscan_key, chatbot],
441
- outputs=[chatbot, portfolio_plot, nft_plot, msg_input]
 
 
 
 
 
442
  )
443
 
444
  return demo
445
 
446
- ###############################################################################
447
- # MAIN EXECUTION #
448
- ###############################################################################
449
-
450
- if __name__ == "__main__":
451
- interface = create_interface()
452
- interface.queue() # Enable queuing for handling async operations
453
- interface.launch(
454
- server_name="0.0.0.0",
455
- server_port=7860,
456
- share=True
457
- )
 
1
+ from PyPDF2 import PdfReader
2
+ from markdownify import markdownify
3
+
4
+ # Persistent System Prompt
5
+ LOSSDOG_PROMPT = """
6
+ <LossDogFramework version="3.0">
7
+ <Identity>
8
+ <Description>
9
+ You are Loss Dog, a cutting-edge AI career advisor, resume analyzer, and builder. Your primary role is to:
10
+ - Read and analyze the user's resume thoroughly.
11
+ - Use the resume as a knowledge context for all interactions.
12
+ - Engage with the user by answering questions, identifying areas of improvement, and offering suggestions.
13
+ </Description>
14
+ </Identity>
15
+ <CoreDirectives>
16
+ <Mission>
17
+ Your mission is to provide actionable resume advice. Always leverage the uploaded resume to give feedback,
18
+ highlight strengths, and identify weaknesses.
19
+ </Mission>
20
+ </CoreDirectives>
21
+ </LossDogFramework>
 
 
 
 
 
 
 
22
  """
23
 
24
+ def extract_text_from_file(file_path: str, file_name: str) -> str:
25
+ """Extract text from a PDF or TXT file."""
26
+ if file_name.endswith(".pdf"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  try:
28
+ pdf_reader = PdfReader(file_path)
29
+ return f.read()
 
 
 
 
 
 
 
 
 
 
 
 
30
  except Exception as e:
31
+ return f"Error reading text file: {str(e)}"
32
+ return "Unsupported file format. Please upload a PDF or TXT file."
33
+
34
+
35
+ def convert_to_markdown(text: str) -> str:
36
+ """Convert extracted file text to Markdown for neat display."""
37
+ return markdownify(text, heading_style="ATX")
38
+
39
+ def interact_with_lossdog(
40
+ user_message: str,
41
+ markdown_text: str,
42
+ api_key: str,
43
+ history: list
44
+ ) -> list:
45
+ """
46
+ Generates the assistant's response, always including the resume content as context
47
+ alongside the conversation history.
48
+ """
49
+
50
+
51
+
52
+
53
+
54
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
 
 
 
 
58
  try:
59
+ openai.api_key = api_key
 
 
60
 
61
+ # Validate existing history entries
62
+ validated_history = []
63
+ for msg in history:
64
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
65
+ validated_history.append({"role": msg["role"], "content": msg["content"]})
66
 
67
+ # Build the messages for OpenAI Chat
68
+ messages = [
69
+ {"role": "system", "content": LOSSDOG_PROMPT},
70
+ {"role": "system", "content": f"Resume Content:\n{markdown_text}"}
71
+ ] + validated_history
72
 
73
+ # Add the new user message at the end
74
+ messages.append({"role": "user", "content": user_message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ # Create ChatCompletion
77
+ response = openai.ChatCompletion.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  model="gpt-4o-mini",
79
+ messages=messages,
80
+ max_tokens=4000 # You can adjust this as needed
 
 
 
 
81
  )
82
+ assistant_response = response.choices[0].message.content
83
+
84
+ # Update local (Gradio) history
85
+ validated_history.append({"role": "user", "content": user_message})
86
+ validated_history.append({"role": "assistant", "content": assistant_response})
87
+
88
+
89
+
90
+ return validated_history
91
  except Exception as e:
92
+ # Append the error as an assistant message (for visibility)
93
+ history.append({"role": "assistant", "content": f"Error: {str(e)}"})
94
+ return history
95
+
96
+ def create_demo():
97
+ """Build the Gradio app."""
98
+ with gr.Blocks(css="#resume-preview {height:300px; overflow-y:auto; border:1px solid #ccc; padding:10px;}") as demo:
 
 
 
 
 
99
  gr.Markdown("""
100
+ # 🐕 LOSS Dog: AI-Powered Resume Advisor
101
+
102
+ **Steps**:
103
+ 1. Upload your resume (PDF/TXT). It will appear in a scrollable box on the right.
104
+ 2. Ask any questions or request feedback. LOSS Dog always references the uploaded resume.
105
+ 3. Enjoy a back-and-forth conversation to refine your resume!
106
  """)
107
 
108
+ # API Key
109
+ api_key = gr.Textbox(
110
+ label="OpenAI API Key",
111
+ placeholder="Enter your OpenAI API key...",
112
+ type="password"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  )
114
 
115
+ # Layout
116
+ with gr.Row():
117
+ with gr.Column(scale=3):
118
+ chatbot = gr.Chatbot(label="Chat with LOSS Dog", type="messages")
119
+ with gr.Column(scale=1):
120
+ markdown_preview = gr.Markdown(label="Resume Preview", elem_id="resume-preview")
121
+
122
+ # User Input
123
  with gr.Row():
124
+ user_input = gr.Textbox(label="Your Message", lines=1)
125
+ send_button = gr.Button("Send 🐾")
126
 
127
+ # File Upload
128
  with gr.Row():
129
+ upload = gr.File(label="Upload Your Resume (PDF or TXT)")
130
+
131
+ # States
132
+ history_state = gr.State([]) # Chat History
133
+ markdown_state = gr.State("") # Stored resume text in Markdown
134
+
135
+ # 1) File Upload Handler
136
+ def handle_upload(file, api_key):
137
+ """
138
+ Extract text -> convert to Markdown -> display in the right pane.
139
+ We do NOT modify the chat history here; user can start fresh or continue.
140
+ """
141
+ if not file:
142
+ return "No file uploaded.", gr.update(value=[])
 
143
 
144
+ text = extract_text_from_file(file.name, file.name)
145
+ if text.startswith("Error"):
146
+ # Show error in preview
147
+ return text, gr.update(value=[])
148
+
149
+ resume_md = convert_to_markdown(text)
150
+ # Keep the conversation? Up to you. We'll keep existing conversation.
151
+ return resume_md, gr.update(value=[])
152
+
153
+ # 2) Chat Message Handler
154
+ def handle_message(user_message, api_key, markdown_text, history):
155
+ """
156
+ Called when the user sends a new message. We pass the stored resume + history.
157
+ """
158
+ updated_history = interact_with_lossdog(user_message, markdown_text, api_key, history)
159
+
160
+ return updated_history, updated_history
161
+
162
+ # Link File Upload -> handle_upload
163
+ upload.change(
164
+ handle_upload,
165
+ inputs=[upload, api_key],
166
+ outputs=[markdown_preview, history_state]
167
  )
168
 
169
+ # Link Send Button -> handle_message
170
+ send_button.click(
171
+ handle_message,
172
+ inputs=[user_input, api_key, markdown_state, history_state],
173
+ outputs=[chatbot, history_state]
 
 
 
174
  )
175
 
176
+ # Any time the user uploads a file, also store the resume text in markdown_state
177
+ # so subsequent messages can see it.
178
+ def store_resume_in_state(markdown_content):
179
+ return markdown_content
180
+
181
+ # We'll create a small chain that ensures markdown_preview -> markdown_state
182
+ markdown_preview.change(
183
+ store_resume_in_state,
184
+ inputs=[markdown_preview],
185
+ outputs=[markdown_state]
186
  )
187
 
188
  return demo
189
 
190
+ if __name__ == "__main__":