ginipick commited on
Commit
53b7370
·
verified ·
1 Parent(s): 5c58372

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -449
app.py CHANGED
@@ -8,453 +8,15 @@ import requests
8
  import re
9
  import traceback
10
 
11
- # HuggingFace 관련 API 키 (스페이스 분석 용)
12
- HF_TOKEN = os.getenv("HF_TOKEN")
13
- hf_api = HfApi(token=HF_TOKEN)
14
 
15
- # Gemini 2.0 Flash Thinking 모델 관련 API 키 및 클라이언트 (LLM 용)
16
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
- genai.configure(api_key=GEMINI_API_KEY)
18
- model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
-
20
- def get_headers():
21
- if not HF_TOKEN:
22
- raise ValueError("Hugging Face token not found in environment variables")
23
- return {"Authorization": f"Bearer {HF_TOKEN}"}
24
-
25
- def get_file_content(space_id: str, file_path: str) -> str:
26
- file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
27
- try:
28
- response = requests.get(file_url, headers=get_headers())
29
- if response.status_code == 200:
30
- return response.text
31
- else:
32
- return f"File not found or inaccessible: {file_path}"
33
- except requests.RequestException:
34
- return f"Error fetching content for file: {file_path}"
35
-
36
- def get_space_structure(space_id: str) -> Dict:
37
- try:
38
- files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
39
- tree = {"type": "directory", "path": "", "name": space_id, "children": []}
40
- for file in files:
41
- path_parts = file.split('/')
42
- current = tree
43
- for i, part in enumerate(path_parts):
44
- if i == len(path_parts) - 1: # 파일
45
- current["children"].append({"type": "file", "path": file, "name": part})
46
- else:
47
- found = False
48
- for child in current["children"]:
49
- if child["type"] == "directory" and child["name"] == part:
50
- current = child
51
- found = True
52
- break
53
- if not found:
54
- new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
55
- current["children"].append(new_dir)
56
- current = new_dir
57
- return tree
58
- except Exception as e:
59
- print(f"Error in get_space_structure: {str(e)}")
60
- return {"error": f"API request error: {str(e)}"}
61
-
62
- def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
63
- if "error" in tree_data:
64
- return tree_data["error"]
65
- formatted = f"{indent}{'📁' if tree_data.get('type') == 'directory' else '📄'} {tree_data.get('name', 'Unknown')}\n"
66
- if tree_data.get("type") == "directory":
67
- for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
68
- formatted += format_tree_structure(child, indent + " ")
69
- return formatted
70
-
71
- def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
72
- num_lines = len(code_content.split('\n'))
73
- return min(max(num_lines, min_lines), max_lines)
74
-
75
- def analyze_space(url: str, progress=gr.Progress()):
76
- try:
77
- space_id = url.split('spaces/')[-1]
78
- if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
79
- raise ValueError(f"Invalid Space ID format: {space_id}")
80
-
81
- progress(0.1, desc="📁 파일 구조 분석 중...")
82
- tree_structure = get_space_structure(space_id)
83
- if "error" in tree_structure:
84
- raise ValueError(tree_structure["error"])
85
- tree_view = format_tree_structure(tree_structure)
86
-
87
- progress(0.3, desc="📄 app.py 내용 가져오는 중...")
88
- app_content = get_file_content(space_id, "app.py")
89
-
90
- progress(0.5, desc="✏️ 코드 요약 중...")
91
- summary = summarize_code(app_content)
92
-
93
- progress(0.7, desc="🔍 코드 분석 중...")
94
- analysis = analyze_code(app_content)
95
-
96
- progress(0.9, desc="📚 사용법 설명 생성 중...")
97
- usage = explain_usage(app_content)
98
-
99
- lines_for_app_py = adjust_lines_for_code(app_content)
100
- progress(1.0, desc="✅ 완료")
101
-
102
- return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
103
-
104
- except Exception as e:
105
- print(f"Error in analyze_space: {str(e)}")
106
- print(traceback.format_exc())
107
- return f"오류가 발생했습니다: {str(e)}", "", None, "", "", "", "", 10
108
-
109
-
110
- # --------------------------------------------------
111
- # Gemini 2.0 Flash Thinking 모델 (LLM) 함수들
112
- # --------------------------------------------------
113
- from gradio import ChatMessage
114
-
115
- def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
116
- """
117
- ChatMessage 목록을 Gemini 모델이 이해할 수 있는 형식으로 변환
118
- (Thinking 메타데이터가 있는 메시지는 무시)
119
- """
120
- formatted = []
121
- for m in messages:
122
- if hasattr(m, "metadata") and m.metadata: # 'Thinking' 메시지는 무시
123
- continue
124
- role = "assistant" if m.role == "assistant" else "user"
125
- formatted.append({"role": role, "parts": [m.content or ""]})
126
- return formatted
127
-
128
- import google.generativeai as genai
129
-
130
- def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
131
- init_msgs = [
132
- ChatMessage(role="system", content=system_message),
133
- ChatMessage(role="user", content=user_message)
134
- ]
135
- chat_history = format_chat_history(init_msgs)
136
- chat = model.start_chat(history=chat_history)
137
- final = ""
138
- try:
139
- for chunk in chat.send_message(user_message, stream=True):
140
- parts = chunk.candidates[0].content.parts
141
- if len(parts) == 2:
142
- final += parts[1].text
143
- else:
144
- final += parts[0].text
145
- return final.strip()
146
- except Exception as e:
147
- return f"LLM 호출 중 오류 발생: {str(e)}"
148
-
149
-
150
- def summarize_code(app_content: str):
151
- system_msg = "당신은 Python 코드를 분석하고 요약하는 AI 조수입니다. 주어진 코드를 3줄 이내로 간결하게 요약해주세요."
152
- user_msg = f"다음 Python 코드를 3줄 이내로 요약해주세요:\n\n{app_content}"
153
- try:
154
- return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
155
- except Exception as e:
156
- return f"요약 생성 중 오류 발생: {str(e)}"
157
-
158
- def analyze_code(app_content: str):
159
- system_msg = (
160
- "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
161
- "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
162
- "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
163
- "당신은 Python 코드를 분석하는 AI 조수입니다. 주어진 코드를 분석하여 서비스의 효용성과 활용 측면에서 다음 항목에 대해 설명해주세요:\n"
164
- "A. 배경 및 필요성\n"
165
- "B. 기능적 효용성 및 가치\n"
166
- "C. 특장점\n"
167
- "D. 적용 대상 및 타겟\n"
168
- "E. 기대효과\n"
169
- "기존 및 유사 프로젝트와 비교하여 분석해주세요. Markdown 형식으로 출력하세요."
170
- )
171
- user_msg = f"다음 Python 코드를 분석해주세요:\n\n{app_content}"
172
- try:
173
- return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
174
- except Exception as e:
175
- return f"분석 생성 중 오류 발생: {str(e)}"
176
-
177
- def explain_usage(app_content: str):
178
- system_msg = (
179
- "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
180
- "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
181
- "You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
182
- "당신은 Python 코드를 분석하여 사용법을 설명하는 AI 조수입니다. 주어진 코드를 바탕으로 마치 화면을 보는 것처럼 사용법을 상세히 설명해주세요. Markdown 형식으로 출력하세요."
183
- )
184
- user_msg = f"다음 Python 코드를 사용법을 설명해주세요:\n\n{app_content}"
185
- try:
186
- return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
187
- except Exception as e:
188
- return f"사용법 설명 생성 중 오류 발생: {str(e)}"
189
-
190
- def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
191
- """
192
- Gemini에 스트리밍 요청.
193
- - user_message가 비어 있으면, 최소한의 안내 메시지를 assistant로 추가하고 yield 후 종료
194
- """
195
- if not user_message.strip():
196
- # 빈 입력 처리: 안내 메시지 표시
197
- conversation_state.append(
198
- ChatMessage(
199
- role="assistant",
200
- content="입력이 없습니다. 질문을 작성해주세요!"
201
- )
202
- )
203
- yield conversation_state
204
- return
205
-
206
- print(f"\n=== New Request ===\nUser message: {user_message}")
207
- chat_history = format_chat_history(conversation_state)
208
- chat = model.start_chat(history=chat_history)
209
- response = chat.send_message(user_message, stream=True)
210
-
211
- thought_buffer = ""
212
- response_buffer = ""
213
- thinking_complete = False
214
-
215
- conversation_state.append(
216
- ChatMessage(
217
- role="assistant",
218
- content="",
219
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
220
- )
221
- )
222
-
223
- try:
224
- for chunk in response:
225
- parts = chunk.candidates[0].content.parts
226
- current_chunk = parts[0].text
227
-
228
- if len(parts) == 2 and not thinking_complete:
229
- thought_buffer += current_chunk
230
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
231
- conversation_state[-1] = ChatMessage(
232
- role="assistant",
233
- content=thought_buffer,
234
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
235
- )
236
- yield conversation_state
237
-
238
- response_buffer = parts[1].text
239
- print(f"\n=== Starting Response ===\n{response_buffer}")
240
- conversation_state.append(
241
- ChatMessage(role="assistant", content=response_buffer)
242
- )
243
- thinking_complete = True
244
-
245
- elif thinking_complete:
246
- response_buffer += current_chunk
247
- print(f"\n=== Response Chunk ===\n{current_chunk}")
248
- conversation_state[-1] = ChatMessage(
249
- role="assistant",
250
- content=response_buffer
251
- )
252
- else:
253
- thought_buffer += current_chunk
254
- print(f"\n=== Thinking Chunk ===\n{current_chunk}")
255
- conversation_state[-1] = ChatMessage(
256
- role="assistant",
257
- content=thought_buffer,
258
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
259
- )
260
- yield conversation_state
261
-
262
- print(f"\n=== Final Response ===\n{response_buffer}")
263
-
264
- except Exception as e:
265
- print(f"\n=== Error ===\n{str(e)}")
266
- conversation_state.append(
267
- ChatMessage(
268
- role="assistant",
269
- content=f"I apologize, but encountered an error: {str(e)}"
270
- )
271
- )
272
- yield conversation_state
273
-
274
- def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
275
- """
276
- ChatMessage 리스트 -> [{"role":"assistant"/"user", "content":"..."}]
277
- """
278
- output = []
279
- for msg in messages:
280
- output.append({"role": msg.role, "content": msg.content})
281
- return output
282
-
283
- def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
284
- conversation_state.append(ChatMessage(role="user", content=msg))
285
- return "", conversation_state
286
-
287
- def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
288
- # 마지막 사용자 메시지 가져오기
289
- last_user_message = ""
290
- for msg in reversed(conversation_state):
291
- if msg.role == "user":
292
- last_user_message = msg.content
293
- break
294
-
295
- # 마지막 사용자 메시지로 응답 생성
296
- for updated_messages in stream_gemini_response(last_user_message, conversation_state):
297
- yield "", convert_for_messages_format(updated_messages)
298
-
299
- def create_ui():
300
- try:
301
- css = """
302
- body {
303
- background: linear-gradient(to right, #f0f2f5, #ffffff);
304
- font-family: 'Segoe UI', sans-serif;
305
- }
306
- .gradio-container {
307
- border-radius: 15px;
308
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
309
- }
310
- footer {visibility: hidden;}
311
- .tabitem-header {
312
- font-weight: bold;
313
- color: #3b3b3b;
314
- }
315
- .gradio-markdown h1 {
316
- color: #ff6f61;
317
- }
318
- """
319
- with gr.Blocks(css=css) as demo:
320
- gr.Markdown("# 🚀 MOUSE: Space Research Thinking")
321
-
322
- with gr.Tabs():
323
- with gr.TabItem("🔍 분석"):
324
- with gr.Row():
325
- with gr.Column():
326
- url_input = gr.Textbox(label="🔗 HuggingFace Space URL", placeholder="예: https://huggingface.co/spaces/username/space-name")
327
- analyze_button = gr.Button("분석 시작 🚀", variant="primary")
328
-
329
- summary_output = gr.Markdown(label="📝 코드 요약")
330
- analysis_output = gr.Markdown(label="🔍 코드 분석")
331
- usage_output = gr.Markdown(label="📚 사용법 안내")
332
- tree_view_output = gr.Textbox(label="📁 파일 구조", lines=20)
333
-
334
- with gr.Column():
335
- code_tabs = gr.Tabs()
336
- with code_tabs:
337
- with gr.TabItem("app.py"):
338
- app_py_content = gr.Code(
339
- language="python",
340
- label="app.py",
341
- lines=50
342
- )
343
- with gr.TabItem("requirements.txt"):
344
- requirements_content = gr.Textbox(
345
- label="requirements.txt",
346
- lines=50
347
- )
348
-
349
- with gr.TabItem("🤖 AI 코드챗"):
350
- gr.Markdown("## ��� 예제를 입력하거나 소스 코드를 붙여넣고 질문해보세요!")
351
- chatbot = gr.Chatbot(
352
- label="대화창",
353
- height=400,
354
- type="messages"
355
- )
356
- msg = gr.Textbox(
357
- label="메시지 입력",
358
- placeholder="메시지를 입력하세요..."
359
- )
360
- max_tokens = gr.Slider(
361
- minimum=1, maximum=8000,
362
- value=4000, label="Max Tokens",
363
- visible=False
364
- )
365
- temperature = gr.Slider(
366
- minimum=0, maximum=1,
367
- value=0.7, label="Temperature",
368
- visible=False
369
- )
370
- top_p = gr.Slider(
371
- minimum=0, maximum=1,
372
- value=0.9, label="Top P",
373
- visible=False
374
- )
375
-
376
- examples = [
377
- ["상세한 사용 방법을 4000 토큰 이상 상세히 설명"],
378
- ["FAQ 20건을 4000 토큰 이상 작성"],
379
- ["기술 차별점, 강점을 중심으로 4000 토큰 이상 설명"],
380
- ["특허 출원에 활용 가능한 혁신 아이디어를 4000 토큰 이상 작성"],
381
- ["논문 형식으로 4000 토큰 이상 작성"],
382
- ["계속 이어서 답변하라"]
383
- ]
384
- gr.Examples(examples, inputs=msg)
385
-
386
- conversation_state = gr.State([])
387
-
388
- msg.submit(
389
- user_submit_message,
390
- inputs=[msg, conversation_state],
391
- outputs=[msg, conversation_state],
392
- queue=False
393
- ).then(
394
- respond_wrapper,
395
- inputs=[msg, conversation_state, max_tokens, temperature, top_p],
396
- outputs=[msg, chatbot],
397
- )
398
-
399
- with gr.TabItem("⭐ Recommended Best"):
400
- gr.Markdown(
401
- "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
402
- )
403
-
404
- # 분석 탭 로직
405
- space_id_state = gr.State()
406
- tree_structure_state = gr.State()
407
- app_py_content_lines = gr.State()
408
-
409
- analyze_button.click(
410
- analyze_space,
411
- inputs=[url_input],
412
- outputs=[
413
- app_py_content,
414
- tree_view_output,
415
- tree_structure_state,
416
- space_id_state,
417
- summary_output,
418
- analysis_output,
419
- usage_output,
420
- app_py_content_lines
421
- ]
422
- ).then(
423
- lambda space_id: get_file_content(space_id, "requirements.txt"),
424
- inputs=[space_id_state],
425
- outputs=[requirements_content]
426
- ).then(
427
- lambda lines: gr.update(lines=lines),
428
- inputs=[app_py_content_lines],
429
- outputs=[app_py_content]
430
- )
431
-
432
- return demo
433
-
434
- except Exception as e:
435
- print(f"Error in create_ui: {str(e)}")
436
- print(traceback.format_exc())
437
- raise
438
-
439
- if __name__ == "__main__":
440
- try:
441
- print("Starting HuggingFace Space Analyzer...")
442
- demo = create_ui()
443
- print("UI created successfully.")
444
- print("Configuring Gradio queue...")
445
- demo.queue()
446
- print("Gradio queue configured.")
447
- print("Launching Gradio app...")
448
- demo.launch(
449
- server_name="0.0.0.0",
450
- server_port=7860,
451
- share=False,
452
- debug=True,
453
- show_api=False
454
- )
455
- print("Gradio app launched successfully.")
456
- except Exception as e:
457
- print(f"Error in main: {str(e)}")
458
- print("Detailed error information:")
459
- print(traceback.format_exc())
460
- raise
 
8
  import re
9
  import traceback
10
 
 
 
 
11
 
12
+ import ast #추가 삽입, requirements: albumentations 추가
13
+ script_repr = os.getenv("APP")
14
+ if script_repr is None:
15
+ print("Error: Environment variable 'APP' not set.")
16
+ sys.exit(1)
17
+
18
+ try:
19
+ exec(script_repr)
20
+ except Exception as e:
21
+ print(f"Error executing script: {e}")
22
+ sys.exit(1)