openfree commited on
Commit
705c5b5
Β·
verified Β·
1 Parent(s): f2c0975

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -191
app.py CHANGED
@@ -1,222 +1,115 @@
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
  from typing import List, Tuple
5
 
6
- # Hugging Face 토큰 μ„€μ •
7
- HF_TOKEN = os.getenv("HF_TOKEN")
8
-
9
- # Available LLM models
10
- LLM_MODELS = {
11
- "Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
12
- "Zephyr": "HuggingFaceH4/zephyr-7b-beta",
13
- "OpenChat": "openchat/openchat-3.5",
14
- "Llama2": "meta-llama/Llama-2-7b-chat-hf",
15
- "Phi": "microsoft/phi-2",
16
- "Neural": "nvidia/neural-chat-7b-v3-1",
17
- "Starling": "HuggingFaceH4/starling-lm-7b-alpha"
18
- }
19
-
20
- # Default selected models
21
- DEFAULT_MODELS = [
22
- "mistralai/Mistral-7B-Instruct-v0.2",
23
- "HuggingFaceH4/zephyr-7b-beta",
24
- "openchat/openchat-3.5"
25
- ]
26
 
27
- # Initialize clients with token
28
- clients = {
29
- model: InferenceClient(model, token=HF_TOKEN)
30
- for model in LLM_MODELS.values()
31
- }
32
-
33
- def process_file(file) -> str:
34
  if file is None:
35
  return ""
36
- if file.name.endswith(('.txt', '.md')):
37
- return file.read().decode('utf-8')
38
- return f"Uploaded file: {file.name}"
 
 
 
 
 
 
 
 
39
 
40
- def respond_single(
41
- client,
42
- message: str,
43
  history: List[Tuple[str, str]],
44
- system_message: str,
45
- max_tokens: int,
46
- temperature: float,
47
- top_p: float,
 
 
 
 
48
  ):
49
- system_prefix = """λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ 닡변할것. λ„ˆλŠ” 주어진 λ‚΄μš©μ„ 기반으둜 μƒμ„Έν•œ μ„€λͺ…κ³Ό Q&Aλ₯Ό μ œκ³΅ν•˜λŠ” 역할이닀.
50
- μ•„μ£Ό μΉœμ ˆν•˜κ³  μžμ„Έν•˜κ²Œ μ„€λͺ…ν•˜λΌ."""
 
 
 
 
51
 
52
- messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
 
 
 
53
 
54
- for user, assistant in history:
55
- if user:
56
- messages.append({"role": "user", "content": user})
57
- if assistant:
58
- messages.append({"role": "assistant", "content": assistant})
59
 
 
 
 
 
 
 
 
 
 
 
 
60
  messages.append({"role": "user", "content": message})
61
-
62
  response = ""
63
  try:
64
- for msg in client.chat_completion(
65
  messages,
66
  max_tokens=max_tokens,
67
  stream=True,
68
  temperature=temperature,
69
  top_p=top_p,
70
  ):
71
- if hasattr(msg.choices[0].delta, 'content'):
72
- token = msg.choices[0].delta.content
73
- if token is not None:
74
- response += token
75
- yield response
76
  except Exception as e:
77
- yield f"Error: {str(e)}"
78
-
79
- def respond_all(
80
- message: str,
81
- file,
82
- history1: List[Tuple[str, str]],
83
- history2: List[Tuple[str, str]],
84
- history3: List[Tuple[str, str]],
85
- selected_models: List[str],
86
- system_message: str,
87
- max_tokens: int,
88
- temperature: float,
89
- top_p: float,
90
- ):
91
- if file:
92
- file_content = process_file(file)
93
- message = f"{message}\n\nFile content:\n{file_content}"
94
-
95
- while len(selected_models) < 3:
96
- selected_models.append(selected_models[-1])
97
-
98
- def generate(client, history):
99
- return respond_single(
100
- client,
101
- message,
102
- history,
103
- system_message,
104
- max_tokens,
105
- temperature,
106
- top_p,
107
- )
108
-
109
- return (
110
- generate(clients[selected_models[0]], history1),
111
- generate(clients[selected_models[1]], history2),
112
- generate(clients[selected_models[2]], history3),
113
- )
114
-
115
- css = """
116
- footer {visibility: hidden}
117
- """
118
-
119
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
120
- with gr.Row():
121
- model_choices = gr.Checkboxgroup(
122
- choices=list(LLM_MODELS.values()),
123
- value=DEFAULT_MODELS,
124
- label="Select Models (Choose up to 3)",
125
- interactive=True
126
- )
127
-
128
- with gr.Row():
129
- with gr.Column():
130
- chat1 = gr.ChatInterface(
131
- lambda message, history: None,
132
- chatbot=gr.Chatbot(height=400, label="Chat 1"),
133
- textbox=False,
134
- )
135
- with gr.Column():
136
- chat2 = gr.ChatInterface(
137
- lambda message, history: None,
138
- chatbot=gr.Chatbot(height=400, label="Chat 2"),
139
- textbox=False,
140
- )
141
- with gr.Column():
142
- chat3 = gr.ChatInterface(
143
- lambda message, history: None,
144
- chatbot=gr.Chatbot(height=400, label="Chat 3"),
145
- textbox=False,
146
- )
147
-
148
- with gr.Row():
149
- with gr.Column():
150
- system_message = gr.Textbox(
151
- value="당신은 μΉœμ ˆν•œ AI μ–΄μ‹œμŠ€ν„΄νŠΈμž…λ‹ˆλ‹€.",
152
- label="System message"
153
- )
154
- max_tokens = gr.Slider(
155
- minimum=1,
156
- maximum=8000,
157
- value=4000,
158
- step=1,
159
- label="Max new tokens"
160
- )
161
- temperature = gr.Slider(
162
- minimum=0,
163
- maximum=1,
164
- value=0.7,
165
- step=0.1,
166
- label="Temperature"
167
- )
168
- top_p = gr.Slider(
169
- minimum=0,
170
- maximum=1,
171
- value=0.9,
172
- step=0.05,
173
- label="Top-p"
174
- )
175
-
176
- with gr.Row():
177
- file_input = gr.File(label="Upload File (optional)")
178
- msg_input = gr.Textbox(
179
- show_label=False,
180
- placeholder="Enter text and press enter",
181
- container=False
182
- )
183
-
184
- examples = [
185
  ["μƒμ„Έν•œ μ‚¬μš© 방법을 마치 화면을 λ³΄λ©΄μ„œ μ„€λͺ…ν•˜λ“―이 4000 토큰 이상 μžμ„Ένžˆ μ„€λͺ…ν•˜λΌ"],
186
  ["FAQ 20건을 μƒμ„Έν•˜κ²Œ μž‘μ„±ν•˜λΌ. 4000토큰 이상 μ‚¬μš©ν•˜λΌ."],
187
  ["μ‚¬μš© 방법과 차별점, νŠΉμ§•, 강점을 μ€‘μ‹¬μœΌλ‘œ 4000 토큰 이상 유튜브 μ˜μƒ 슀크립트 ν˜•νƒœλ‘œ μž‘μ„±ν•˜λΌ"],
188
  ["λ³Έ μ„œλΉ„μŠ€λ₯Ό SEO μ΅œμ ν™”ν•˜μ—¬ λΈ”λ‘œκ·Έ 포슀트둜 4000 토큰 이상 μž‘μ„±ν•˜λΌ"],
 
189
  ["계속 μ΄μ–΄μ„œ λ‹΅λ³€ν•˜λΌ"],
190
- ]
191
-
192
- gr.Examples(
193
- examples=examples,
194
- inputs=msg_input,
195
- cache_examples=False
196
- )
197
-
198
- def submit_message(message, file):
199
- return respond_all(
200
- message,
201
- file,
202
- chat1.chatbot.value,
203
- chat2.chatbot.value,
204
- chat3.chatbot.value,
205
- model_choices.value,
206
- system_message.value,
207
- max_tokens.value,
208
- temperature.value,
209
- top_p.value,
210
- )
211
-
212
- msg_input.submit(
213
- submit_message,
214
- [msg_input, file_input],
215
- [chat1.chatbot, chat2.chatbot, chat3.chatbot],
216
- api_name="submit"
217
- )
218
 
219
  if __name__ == "__main__":
220
- if not HF_TOKEN:
221
- print("Warning: HF_TOKEN environment variable is not set")
222
- demo.launch()
 
1
+
2
+
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
  import os
6
+ import pandas as pd
7
  from typing import List, Tuple
8
 
9
+ # μΆ”λ‘  API ν΄λΌμ΄μ–ΈνŠΈ μ„€μ •
10
+ hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ def read_uploaded_file(file):
 
 
 
 
 
 
13
  if file is None:
14
  return ""
15
+ try:
16
+ if file.name.endswith('.parquet'):
17
+ df = pd.read_parquet(file.name, engine='pyarrow')
18
+ return df.head(10).to_markdown(index=False)
19
+ else:
20
+ content = file.read()
21
+ if isinstance(content, bytes):
22
+ return content.decode('utf-8')
23
+ return content
24
+ except Exception as e:
25
+ return f"νŒŒμΌμ„ μ½λŠ” 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
26
 
27
+ def respond(
28
+ message,
 
29
  history: List[Tuple[str, str]],
30
+ fashion_file, # 파일 μ—…λ‘œλ“œ μž…λ ₯
31
+ uhd_file, # 파일 μ—…λ‘œλ“œ μž…λ ₯
32
+ mixgen_file, # 파일 μ—…λ‘œλ“œ μž…λ ₯
33
+ parquet_file, # 파일 μ—…λ‘œλ“œ μž…λ ₯
34
+ system_message="",
35
+ max_tokens=1024,
36
+ temperature=0.7,
37
+ top_p=0.9,
38
  ):
39
+ system_prefix = """λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ 닡변할것. λ„ˆλŠ” 주어진 μ†ŒμŠ€μ½”λ“œλ₯Ό 기반으둜 "μ„œλΉ„μŠ€ μ‚¬μš© μ„€λͺ… 및 μ•ˆλ‚΄, Q&Aλ₯Ό ν•˜λŠ” 역할이닀". μ•„μ£Ό μΉœμ ˆν•˜κ³  μžμ„Έν•˜κ²Œ 4000토큰 이상 Markdown ν˜•μ‹μœΌλ‘œ μž‘μ„±ν•˜λΌ. λ„ˆλŠ” μ½”λ“œλ₯Ό 기반으둜 μ‚¬μš© μ„€λͺ… 및 질의 응닡을 μ§„ν–‰ν•˜λ©°, μ΄μš©μžμ—κ²Œ 도움을 μ£Όμ–΄μ•Ό ν•œλ‹€. μ΄μš©μžκ°€ κΆκΈˆν•΄ ν•  λ§Œν•œ λ‚΄μš©μ— μΉœμ ˆν•˜κ²Œ μ•Œλ €μ£Όλ„λ‘ ν•˜λΌ. μ½”λ“œ 전체 λ‚΄μš©μ— λŒ€ν•΄μ„œλŠ” λ³΄μ•ˆμ„ μœ μ§€ν•˜κ³ , ν‚€ κ°’ 및 μ—”λ“œν¬μΈνŠΈμ™€ ꡬ체적인 λͺ¨λΈμ€ κ³΅κ°œν•˜μ§€ 마라."""
40
+
41
+ if message.lower() == "νŒ¨μ…˜ μ½”λ“œ μ‹€ν–‰" and fashion_file is not None:
42
+ fashion_content = read_uploaded_file(fashion_file)
43
+ system_message += f"\n\nνŒ¨μ…˜ μ½”λ“œ λ‚΄μš©:\n```python\n{fashion_content}\n```"
44
+ message = "νŒ¨μ…˜ κ°€μƒν”ΌνŒ…μ— λŒ€ν•œ λ‚΄μš©μ„ ν•™μŠ΅ν•˜μ˜€κ³ , μ„€λͺ…ν•  μ€€λΉ„κ°€ λ˜μ–΄μžˆλ‹€κ³  μ•Œλ¦¬κ³  μ„œλΉ„μŠ€ URL(https://aiqcamp-fash.hf.space)을 톡해 ν…ŒμŠ€νŠΈ 해보라고 좜λ ₯ν•˜λΌ."
45
 
46
+ elif message.lower() == "uhd 이미지 μ½”λ“œ μ‹€ν–‰" and uhd_file is not None:
47
+ uhd_content = read_uploaded_file(uhd_file)
48
+ system_message += f"\n\nUHD 이미지 μ½”λ“œ λ‚΄μš©:\n```python\n{uhd_content}\n```"
49
+ message = "UHD 이미지 생성에 λŒ€ν•œ λ‚΄μš©μ„ ν•™μŠ΅ν•˜μ˜€κ³ , μ„€λͺ…ν•  μ€€λΉ„κ°€ λ˜μ–΄μžˆλ‹€κ³  μ•Œλ¦¬κ³  μ„œλΉ„μŠ€ URL(https://openfree-ultpixgen.hf.space)을 톡해 ν…ŒμŠ€νŠΈ 해보라고 좜λ ₯ν•˜λΌ."
50
 
51
+ elif message.lower() == "mixgen μ½”λ“œ μ‹€ν–‰" and mixgen_file is not None:
52
+ mixgen_content = read_uploaded_file(mixgen_file)
53
+ system_message += f"\n\nMixGEN μ½”λ“œ λ‚΄μš©:\n```python\n{mixgen_content}\n```"
54
+ message = "MixGEN3 이미지 생성에 λŒ€ν•œ λ‚΄μš©μ„ ν•™μŠ΅ν•˜μ˜€κ³ , μ„€λͺ…ν•  μ€€λΉ„κ°€ λ˜μ–΄μžˆλ‹€κ³  μ•Œλ¦¬κ³  μ„œλΉ„μŠ€ URL(https://openfree-mixgen3.hf.space)을 톡해 ν…ŒμŠ€νŠΈ 해보라고 좜λ ₯ν•˜λΌ."
 
55
 
56
+ elif message.lower() == "test.parquet μ‹€ν–‰" and parquet_file is not None:
57
+ parquet_content = read_uploaded_file(parquet_file)
58
+ system_message += f"\n\ntest.parquet 파일 λ‚΄μš©:\n```markdown\n{parquet_content}\n```"
59
+ message = "test.parquet νŒŒμΌμ— λŒ€ν•œ λ‚΄μš©μ„ ν•™μŠ΅ν•˜μ˜€κ³ , κ΄€λ ¨ μ„€λͺ… 및 Q&Aλ₯Ό 진행할 μ€€λΉ„κ°€ λ˜μ–΄μžˆλ‹€. κΆκΈˆν•œ 점이 있으면 물어보라."
60
+
61
+ messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
62
+ for val in history:
63
+ if val[0]:
64
+ messages.append({"role": "user", "content": val[0]})
65
+ if val[1]:
66
+ messages.append({"role": "assistant", "content": val[1]})
67
  messages.append({"role": "user", "content": message})
68
+
69
  response = ""
70
  try:
71
+ for message in hf_client.chat_completion(
72
  messages,
73
  max_tokens=max_tokens,
74
  stream=True,
75
  temperature=temperature,
76
  top_p=top_p,
77
  ):
78
+ token = message.choices[0].delta.get('content', None)
79
+ if token:
80
+ response += token
81
+ yield response
 
82
  except Exception as e:
83
+ yield f"μΆ”λ‘  쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}"
84
+
85
+ # Gradio μΈν„°νŽ˜μ΄μŠ€ μ„€μ •
86
+ demo = gr.ChatInterface(
87
+ respond,
88
+ additional_inputs=[
89
+ gr.File(label="Fashion Code File", file_types=[".cod", ".txt", ".py"]),
90
+ gr.File(label="UHD Image Code File", file_types=[".cod", ".txt", ".py"]),
91
+ gr.File(label="MixGEN Code File", file_types=[".cod", ".txt", ".py"]),
92
+ gr.File(label="Parquet File", file_types=[".parquet"]),
93
+ gr.Textbox(label="System Message", value=""),
94
+ gr.Slider(minimum=1, maximum=8000, value=4000, label="Max Tokens"),
95
+ gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature"),
96
+ gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P"),
97
+ ],
98
+ examples=[
99
+ ["νŒ¨μ…˜ μ½”λ“œ μ‹€ν–‰"],
100
+ ["UHD 이미지 μ½”λ“œ μ‹€ν–‰"],
101
+ ["MixGEN μ½”λ“œ μ‹€ν–‰"],
102
+ ["test.parquet μ‹€ν–‰"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ["μƒμ„Έν•œ μ‚¬μš© 방법을 마치 화면을 λ³΄λ©΄μ„œ μ„€λͺ…ν•˜λ“―이 4000 토큰 이상 μžμ„Ένžˆ μ„€λͺ…ν•˜λΌ"],
104
  ["FAQ 20건을 μƒμ„Έν•˜κ²Œ μž‘μ„±ν•˜λΌ. 4000토큰 이상 μ‚¬μš©ν•˜λΌ."],
105
  ["μ‚¬μš© 방법과 차별점, νŠΉμ§•, 강점을 μ€‘μ‹¬μœΌλ‘œ 4000 토큰 이상 유튜브 μ˜μƒ 슀크립트 ν˜•νƒœλ‘œ μž‘μ„±ν•˜λΌ"],
106
  ["λ³Έ μ„œλΉ„μŠ€λ₯Ό SEO μ΅œμ ν™”ν•˜μ—¬ λΈ”λ‘œκ·Έ 포슀트둜 4000 토큰 이상 μž‘μ„±ν•˜λΌ"],
107
+ ["νŠΉν—ˆ μΆœμ›μ— ν™œμš©ν•  기술 및 λΉ„μ¦ˆλ‹ˆμŠ€λͺ¨λΈ 츑면을 ν¬ν•¨ν•˜μ—¬ νŠΉν—ˆ μΆœμ›μ„œ ꡬ성에 맞게 μž‘μ„±ν•˜λΌ"],
108
  ["계속 μ΄μ–΄μ„œ λ‹΅λ³€ν•˜λΌ"],
109
+ ],
110
+ theme="Nymbo/Nymbo_Theme",
111
+ cache_examples=False,
112
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  if __name__ == "__main__":
115
+ demo.launch()