playground25 / app.py
fantaxy's picture
Update app.py
0002555 verified
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
import json
# Get API token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable is not set")
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
print("Starting query function...")
if not prompt:
raise gr.Error("Prompt cannot be empty")
# Set headers with API token
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Generate a unique key for tracking
key = random.randint(0, 999)
# Enhance prompt
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'Generation {key}: {prompt}')
try:
# Set API URL based on model selection
if custom_lora.strip():
API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
else:
if model == 'Stable Diffusion XL':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
elif model == 'FLUX.1 [Dev]':
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
elif model == 'FLUX.1 [Schnell]':
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
elif model == 'Flux Logo Design':
API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design"
prompt = f"wablogo, logo, Minimalist, {prompt}"
elif model == 'Flux Uncensored':
API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
elif model == 'Flux Uncensored V2':
API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2"
elif model == 'Flux Tarot Cards':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA"
prompt = f"Tarot card, {prompt}"
elif model == 'Pixel Art Sprites':
API_URL = "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux"
prompt = f"a pixel image, {prompt}"
elif model == '3D Sketchfab':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA"
prompt = f"3D Sketchfab, {prompt}"
elif model == 'Retro Comic Flux':
API_URL = "https://api-inference.huggingface.co/models/renderartist/retrocomicflux"
prompt = f"c0m1c, comic book panel, {prompt}"
elif model == 'Caricature':
API_URL = "https://api-inference.huggingface.co/models/TheAwakenOne/caricature"
prompt = f"CCTUR3, {prompt}"
elif model == 'Huggieverse':
API_URL = "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse"
prompt = f"HGGRE, {prompt}"
elif model == 'Propaganda Poster':
API_URL = "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion"
prompt = f"propaganda poster, {prompt}"
elif model == 'Flux Game Assets V2':
API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2"
prompt = f"wbgmsst, white background, {prompt}"
elif model == 'SoftPasty Flux':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev"
prompt = f"araminta_illus illustration style, {prompt}"
elif model == 'Flux Stickers':
API_URL = "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora"
prompt = f"5t1cker 5ty1e, {prompt}"
elif model == 'Flux Animex V2':
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA"
prompt = f"Animex, {prompt}"
elif model == 'Flux Animeo V1':
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA"
prompt = f"Animeo, {prompt}"
elif model == 'Movie Board':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA"
prompt = f"movieboard, {prompt}"
elif model == 'Purple Dreamy':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA"
prompt = f"Purple Dreamy, {prompt}"
elif model == 'PS1 Style Flux':
API_URL = "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux"
prompt = f"ps1 game screenshot, {prompt}"
elif model == 'Softserve Anime':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/softserve_anime"
prompt = f"sftsrv style illustration, {prompt}"
elif model == 'Flux Tarot v1':
API_URL = "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1"
prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
elif model == 'Half Illustration':
API_URL = "https://api-inference.huggingface.co/models/davisbro/half_illustration"
prompt = f"in the style of TOK, {prompt}"
elif model == 'OpenDalle v1.1':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
elif model == 'Flux Ghibsky Illustration':
API_URL = "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration"
prompt = f"GHIBSKY style, {prompt}"
elif model == 'Flux Koda':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/flux-koda"
prompt = f"flmft style, {prompt}"
elif model == 'Soviet Diffusion XL':
API_URL = "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl"
prompt = f"soviet poster, {prompt}"
elif model == 'Flux Realism LoRA':
API_URL = "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora"
elif model == 'Frosting Lane Flux':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux"
prompt = f"frstingln illustration, {prompt}"
elif model == 'Phantasma Anime':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime"
elif model == 'Boreal':
API_URL = "https://api-inference.huggingface.co/models/kudzueye/Boreal"
prompt = f"photo, {prompt}"
elif model == 'How2Draw':
API_URL = "https://api-inference.huggingface.co/models/glif/how2draw"
prompt = f"How2Draw, {prompt}"
elif model == 'Flux AestheticAnime':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime"
elif model == 'Fashion Hut Modeling LoRA':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA"
prompt = f"Modeling of, {prompt}"
elif model == 'Flux SyntheticAnime':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime"
prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
elif model == 'Flux Midjourney Anime':
API_URL = "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime"
prompt = f"egmid, {prompt}"
elif model == 'Coloring Book Generator':
API_URL = "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator"
elif model == 'Collage Flux':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA"
prompt = f"collage, {prompt}"
elif model == 'Flux Product Ad Backdrop':
API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop"
prompt = f"Product Ad, {prompt}"
elif model == 'Product Design':
API_URL = "https://api-inference.huggingface.co/models/multimodalart/product-design"
prompt = f"product designed by prdsgn, {prompt}"
elif model == '90s Anime Art':
API_URL = "https://api-inference.huggingface.co/models/glif/90s-anime-art"
elif model == 'Brain Melt Acid Art':
API_URL = "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art"
prompt = f"maximalism, in an acid surrealism style, {prompt}"
elif model == 'Lustly Flux Uncensored v1':
API_URL = "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1"
elif model == 'NSFW Master Flux':
API_URL = "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX"
prompt = f"NSFW, {prompt}"
elif model == 'Flux Outfit Generator':
API_URL = "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator"
elif model == 'Midjourney':
API_URL = "https://api-inference.huggingface.co/models/Jovie/Midjourney"
elif model == 'DreamPhotoGASM':
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
elif model == 'Flux Super Realism LoRA':
API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA"
elif model == 'Stable Diffusion 2-1':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base"
elif model == 'Stable Diffusion 3.5 Large':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
elif model == 'Stable Diffusion 3.5 Large Turbo':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
elif model == 'Stable Diffusion 3 Medium':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
prompt = f"A, {prompt}"
elif model == 'Duchaiten Real3D NSFW XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl"
elif model == 'Pixel Art XL':
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
prompt = f"pixel art, {prompt}"
elif model == 'Character Design':
API_URL = "https://api-inference.huggingface.co/models/KappaNeuro/character-design"
prompt = f"Character Design, {prompt}"
elif model == 'Sketched Out Manga':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga"
prompt = f"daiton, {prompt}"
elif model == 'Archfey Anime':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/archfey_anime"
elif model == 'Lofi Cuties':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties"
elif model == 'YiffyMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/YiffyMix"
elif model == 'Analog Madness Realistic v7':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7"
elif model == 'Selfie Photography':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl"
prompt = f"instagram model, discord profile picture, {prompt}"
elif model == 'Filmgrain':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl"
prompt = f"Film Grain, FilmGrainAF, {prompt}"
elif model == 'Leonardo AI Style Illustration':
API_URL = "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration"
prompt = f"leonardo style, illustration, vector art, {prompt}"
elif model == 'Cyborg Style XL':
API_URL = "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl"
prompt = f"cyborg style, {prompt}"
elif model == 'Little Tinies':
API_URL = "https://api-inference.huggingface.co/models/alvdansen/littletinies"
elif model == 'NSFW XL':
API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
elif model == 'Analog Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
prompt = f"timeless style, {prompt}"
elif model == 'Pixel Art Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
prompt = f"Pixel Art, {prompt}"
elif model == 'Ascii Art':
API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
prompt = f"ascii art, {prompt}"
elif model == 'Analog':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
elif model == 'Maple Syrup':
API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
elif model == 'Perfect Lewd Fantasy':
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
elif model == 'AbsoluteReality 1.8.1':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
elif model == 'Disney':
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
prompt = f"Disney style, {prompt}"
elif model == 'Redmond SDXL':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
elif model == 'epiCPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
else:
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
# Prepare payload
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength,
"parameters": {
"width": width,
"height": height
}
}
# 타임아웃 값을 늘리고 재시도 로직 추가
max_retries = 3
current_retry = 0
while current_retry < max_retries:
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=180) # 타임아웃을 180초로 증가
response.raise_for_status()
image = Image.open(io.BytesIO(response.content))
print(f'Generation {key} completed successfully')
return image
except requests.exceptions.Timeout:
current_retry += 1
if current_retry < max_retries:
print(f"Timeout occurred. Retrying... (Attempt {current_retry + 1}/{max_retries})")
continue
else:
raise gr.Error(f"Request timed out after {max_retries} attempts. The model might be busy, please try again later.")
except requests.exceptions.RequestException as e:
raise gr.Error(f"Request failed: {str(e)}")
except requests.exceptions.RequestException as e:
error_message = f"Request failed: {str(e)}"
if hasattr(e, 'response') and e.response is not None:
if e.response.status_code == 401:
error_message = "Invalid API token. Please check your Hugging Face API token."
elif e.response.status_code == 403:
error_message = "Access denied. Please check your API token permissions."
elif e.response.status_code == 503:
error_message = "Model is currently loading. Please try again in a few moments."
raise gr.Error(error_message)
except Exception as e:
raise gr.Error(f"Unexpected error: {str(e)}")
def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height, progress=gr.Progress()):
if len(selected_models) > 4:
raise gr.Error("Please select up to 4 models")
if len(selected_models) == 0:
raise gr.Error("Please select at least 1 model")
# 초기 이미지 배열 생성
images = [None] * 4
total_models = len(selected_models[:4])
def update_gallery():
# None이 아닌 이미지만 포함하여 갤러리 업데이트
return [img for img in images if img is not None]
# 각 모델별로 이미지 생성
for idx, model_name in enumerate(selected_models[:4]):
try:
progress((idx + 1) / total_models, f"Generating image for {model_name}...")
img = query(prompt, model_name, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height)
images[idx] = img
# 이미지가 생성될 때마다 갤러리 업데이트
yield update_gallery()
except Exception as e:
print(f"Error generating image for {model_name}: {str(e)}")
continue
# 남은 슬롯을 마지막 생성된 이미지로 채움
last_valid_image = next((img for img in reversed(images) if img is not None), None)
if last_valid_image:
for i in range(len(images)):
if images[i] is None:
images[i] = last_valid_image
progress(1.0, "Generation complete!")
yield update_gallery()
css = """
footer {
visibility: hidden;
}
"""
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
gr.Markdown("# ZeroWeight Studio")
with gr.Row():
with gr.Column(scale=2):
text_prompt = gr.Textbox(
label="Prompt",
placeholder="Describe what you want to create...",
lines=3
)
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="What should not be in the image",
value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
lines=2
)
custom_lora = gr.Textbox(
label="Custom LoRA Path (Optional)",
placeholder="e.g., multimodalart/vintage-ads-flux",
lines=1
)
with gr.Column(scale=1):
with gr.Group():
gr.Markdown("### Image Settings")
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=1216, step=64)
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=1216, step=64)
with gr.Group():
gr.Markdown("### Generation Parameters")
steps = gr.Slider(label="Steps", value=35, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5)
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.1)
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=1000000000, step=1)
with gr.Accordion("Model Selection", open=False):
model_search = gr.Textbox(
label="Search Models",
placeholder="Type to filter models...",
lines=1
)
# 상위 4개 모델을 기본으로 설정
default_models = [
"FLUX.1 [Schnell]", # 모델 이름 통일
"Stable Diffusion 3.5 Large",
"Stable Diffusion 3.5 Large Turbo",
"Midjourney"
]
# 전체 모델 리스트
models_list = [
"FLUX.1 [Schnell]", # 모델 이름 통일
"Stable Diffusion 3.5 Large",
"Stable Diffusion 3.5 Large Turbo",
"Stable Diffusion XL",
"FLUX.1 [Dev]",
"Midjourney",
"DreamPhotoGASM",
"Disney",
"Leonardo AI Style Illustration",
"AbsoluteReality 1.8.1",
"Analog Redmond",
"Stable Diffusion 3 Medium",
"Flux Super Realism LoRA",
"Flux Realism LoRA",
"Selfie Photography",
"Character Design",
"Pixel Art XL",
"3D Sketchfab",
"Flux Animex V2",
"Flux Animeo V1",
"Flux AestheticAnime",
"90s Anime Art",
"Softserve Anime",
"Brain Melt Acid Art",
"Retro Comic Flux",
"Purple Dreamy",
"SoftPasty Flux",
"Flux Logo Design",
"Product Design",
"Propaganda Poster",
"Movie Board",
"Collage Flux"
]
model = gr.Checkboxgroup(
label="Select Models (Choose up to 4)",
choices=models_list,
value=default_models,
interactive=True
)
with gr.Row():
generate_btn = gr.Button("Generate 2x2 Grid", variant="primary", size="lg")
with gr.Row():
gallery = gr.Gallery(
label="Generated Images",
show_label=True,
elem_id="gallery",
columns=2,
rows=2,
height="auto",
preview=True, # 실시간 프리뷰 활성화
)
# 이벤트 핸들러 수정
generate_btn.click(
fn=generate_grid,
inputs=[
text_prompt,
model,
custom_lora,
negative_prompt,
steps,
cfg,
seed,
strength,
width,
height
],
outputs=gallery,
show_progress=True
)
def filter_models(search_term):
filtered_models = [m for m in models_list if search_term.lower() in m.lower()]
return gr.update(choices=filtered_models, value=[])
model_search.change(filter_models, inputs=model_search, outputs=model)
if __name__ == "__main__":
dalle.launch(show_api=False, share=False)