Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -21,6 +21,7 @@ API_URL_DEV = "https://api-inference.huggingface.co/models/black-forest-labs/FLU
|
|
21 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
22 |
|
23 |
chatter="K00B404/transcript_image_generator"
|
|
|
24 |
# Initialize the API client for the chatbot
|
25 |
chatbot_client = Client(chatter)
|
26 |
|
@@ -79,7 +80,7 @@ def enhance_prompt_v2(prompt, model="mistralai/Mistral-Nemo-Instruct-2407", styl
|
|
79 |
result = client.predict(
|
80 |
system_prompt=system_prompt,
|
81 |
user_message=user_message,
|
82 |
-
max_tokens=
|
83 |
model_id=model,
|
84 |
api_name="/predict"
|
85 |
)
|
@@ -101,7 +102,7 @@ def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-
|
|
101 |
messages=[{"role": "system", "content": system_prompt},
|
102 |
{"role": "user", "content": prompt}
|
103 |
],
|
104 |
-
max_tokens=
|
105 |
stream=True,
|
106 |
):
|
107 |
response += message.choices[0].delta.content
|
@@ -166,7 +167,7 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
166 |
You must respond only with the enhanced version of the users input prompt
|
167 |
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
168 |
"""
|
169 |
-
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=
|
170 |
#prompt = enhance_prompt_v2(prompt, style=enhance_prompt_style)
|
171 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
172 |
|
@@ -177,7 +178,7 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
177 |
You must respond only with the enhanced version of the users input prompt
|
178 |
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
179 |
"""
|
180 |
-
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=
|
181 |
#prompt = mistral_nemo_call(prompt, API_TOKEN=API_TOKEN, style=nemo_enhance_prompt_style)
|
182 |
print(f'\033[1mGeneration {key} Mistral-Nemo prompt:\033[0m {prompt}')
|
183 |
|
@@ -230,7 +231,7 @@ title_html="""
|
|
230 |
css = """
|
231 |
.gradio-container {
|
232 |
background: url(https://huggingface.co/spaces/K00B404/FLUX.1-Dev-Serverless-darn-enhanced-prompt/resolve/main/edge.png);
|
233 |
-
background-size: 900px
|
234 |
background-repeat: no-repeat;
|
235 |
background-position: center;
|
236 |
background-attachment: fixed;
|
|
|
21 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
22 |
|
23 |
chatter="K00B404/transcript_image_generator"
|
24 |
+
max_tokens_enhance_bot=128
|
25 |
# Initialize the API client for the chatbot
|
26 |
chatbot_client = Client(chatter)
|
27 |
|
|
|
80 |
result = client.predict(
|
81 |
system_prompt=system_prompt,
|
82 |
user_message=user_message,
|
83 |
+
max_tokens=max_tokens_enhance_bot,
|
84 |
model_id=model,
|
85 |
api_name="/predict"
|
86 |
)
|
|
|
102 |
messages=[{"role": "system", "content": system_prompt},
|
103 |
{"role": "user", "content": prompt}
|
104 |
],
|
105 |
+
max_tokens=max_tokens_enhance_bot,
|
106 |
stream=True,
|
107 |
):
|
108 |
response += message.choices[0].delta.content
|
|
|
167 |
You must respond only with the enhanced version of the users input prompt
|
168 |
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
169 |
"""
|
170 |
+
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=max_tokens_enhance_bot, temperature=0.1, top_p=0.97)
|
171 |
#prompt = enhance_prompt_v2(prompt, style=enhance_prompt_style)
|
172 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
173 |
|
|
|
178 |
You must respond only with the enhanced version of the users input prompt
|
179 |
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
180 |
"""
|
181 |
+
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=max_tokens_enhance_bot, temperature=0.1, top_p=0.97)
|
182 |
#prompt = mistral_nemo_call(prompt, API_TOKEN=API_TOKEN, style=nemo_enhance_prompt_style)
|
183 |
print(f'\033[1mGeneration {key} Mistral-Nemo prompt:\033[0m {prompt}')
|
184 |
|
|
|
231 |
css = """
|
232 |
.gradio-container {
|
233 |
background: url(https://huggingface.co/spaces/K00B404/FLUX.1-Dev-Serverless-darn-enhanced-prompt/resolve/main/edge.png);
|
234 |
+
background-size: 900px 1880px;
|
235 |
background-repeat: no-repeat;
|
236 |
background-position: center;
|
237 |
background-attachment: fixed;
|