fantaxy commited on
Commit
c225692
1 Parent(s): ee0920f

Create back.py

Browse files
Files changed (1) hide show
  1. back.py +476 -0
back.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import io
4
+ import random
5
+ import os
6
+ from PIL import Image
7
+ import json
8
+
9
+ # Get API token from environment variable
10
+ HF_TOKEN = os.getenv("HF_TOKEN")
11
+ if not HF_TOKEN:
12
+ raise ValueError("HF_TOKEN environment variable is not set")
13
+
14
+ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
15
+ print("Starting query function...")
16
+
17
+ if not prompt:
18
+ raise gr.Error("Prompt cannot be empty")
19
+
20
+ # Set headers with API token
21
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
22
+
23
+ # Generate a unique key for tracking
24
+ key = random.randint(0, 999)
25
+
26
+ # Enhance prompt
27
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
28
+ print(f'Generation {key}: {prompt}')
29
+
30
+ try:
31
+ # Set API URL based on model selection
32
+ if custom_lora.strip():
33
+ API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
34
+ else:
35
+ if model == 'Stable Diffusion XL':
36
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
37
+ elif model == 'FLUX.1 [Dev]':
38
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
39
+ elif model == 'FLUX.1 [Schnell]':
40
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
41
+ elif model == 'Flux Logo Design':
42
+ API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design"
43
+ prompt = f"wablogo, logo, Minimalist, {prompt}"
44
+ elif model == 'Flux Uncensored':
45
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
46
+ elif model == 'Flux Uncensored V2':
47
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2"
48
+ elif model == 'Flux Tarot Cards':
49
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA"
50
+ prompt = f"Tarot card, {prompt}"
51
+ elif model == 'Pixel Art Sprites':
52
+ API_URL = "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux"
53
+ prompt = f"a pixel image, {prompt}"
54
+ elif model == '3D Sketchfab':
55
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA"
56
+ prompt = f"3D Sketchfab, {prompt}"
57
+ elif model == 'Retro Comic Flux':
58
+ API_URL = "https://api-inference.huggingface.co/models/renderartist/retrocomicflux"
59
+ prompt = f"c0m1c, comic book panel, {prompt}"
60
+ elif model == 'Caricature':
61
+ API_URL = "https://api-inference.huggingface.co/models/TheAwakenOne/caricature"
62
+ prompt = f"CCTUR3, {prompt}"
63
+ elif model == 'Huggieverse':
64
+ API_URL = "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse"
65
+ prompt = f"HGGRE, {prompt}"
66
+ elif model == 'Propaganda Poster':
67
+ API_URL = "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion"
68
+ prompt = f"propaganda poster, {prompt}"
69
+ elif model == 'Flux Game Assets V2':
70
+ API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2"
71
+ prompt = f"wbgmsst, white background, {prompt}"
72
+ elif model == 'SoftPasty Flux':
73
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev"
74
+ prompt = f"araminta_illus illustration style, {prompt}"
75
+ elif model == 'Flux Stickers':
76
+ API_URL = "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora"
77
+ prompt = f"5t1cker 5ty1e, {prompt}"
78
+ elif model == 'Flux Animex V2':
79
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA"
80
+ prompt = f"Animex, {prompt}"
81
+ elif model == 'Flux Animeo V1':
82
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA"
83
+ prompt = f"Animeo, {prompt}"
84
+ elif model == 'Movie Board':
85
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA"
86
+ prompt = f"movieboard, {prompt}"
87
+ elif model == 'Purple Dreamy':
88
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA"
89
+ prompt = f"Purple Dreamy, {prompt}"
90
+ elif model == 'PS1 Style Flux':
91
+ API_URL = "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux"
92
+ prompt = f"ps1 game screenshot, {prompt}"
93
+ elif model == 'Softserve Anime':
94
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softserve_anime"
95
+ prompt = f"sftsrv style illustration, {prompt}"
96
+ elif model == 'Flux Tarot v1':
97
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1"
98
+ prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
99
+ elif model == 'Half Illustration':
100
+ API_URL = "https://api-inference.huggingface.co/models/davisbro/half_illustration"
101
+ prompt = f"in the style of TOK, {prompt}"
102
+ elif model == 'OpenDalle v1.1':
103
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
104
+ elif model == 'Flux Ghibsky Illustration':
105
+ API_URL = "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration"
106
+ prompt = f"GHIBSKY style, {prompt}"
107
+ elif model == 'Flux Koda':
108
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/flux-koda"
109
+ prompt = f"flmft style, {prompt}"
110
+ elif model == 'Soviet Diffusion XL':
111
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl"
112
+ prompt = f"soviet poster, {prompt}"
113
+ elif model == 'Flux Realism LoRA':
114
+ API_URL = "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora"
115
+ elif model == 'Frosting Lane Flux':
116
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux"
117
+ prompt = f"frstingln illustration, {prompt}"
118
+ elif model == 'Phantasma Anime':
119
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime"
120
+ elif model == 'Boreal':
121
+ API_URL = "https://api-inference.huggingface.co/models/kudzueye/Boreal"
122
+ prompt = f"photo, {prompt}"
123
+ elif model == 'How2Draw':
124
+ API_URL = "https://api-inference.huggingface.co/models/glif/how2draw"
125
+ prompt = f"How2Draw, {prompt}"
126
+ elif model == 'Flux AestheticAnime':
127
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime"
128
+ elif model == 'Fashion Hut Modeling LoRA':
129
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA"
130
+ prompt = f"Modeling of, {prompt}"
131
+ elif model == 'Flux SyntheticAnime':
132
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime"
133
+ prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
134
+ elif model == 'Flux Midjourney Anime':
135
+ API_URL = "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime"
136
+ prompt = f"egmid, {prompt}"
137
+ elif model == 'Coloring Book Generator':
138
+ API_URL = "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator"
139
+ elif model == 'Collage Flux':
140
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA"
141
+ prompt = f"collage, {prompt}"
142
+ elif model == 'Flux Product Ad Backdrop':
143
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop"
144
+ prompt = f"Product Ad, {prompt}"
145
+ elif model == 'Product Design':
146
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/product-design"
147
+ prompt = f"product designed by prdsgn, {prompt}"
148
+ elif model == '90s Anime Art':
149
+ API_URL = "https://api-inference.huggingface.co/models/glif/90s-anime-art"
150
+ elif model == 'Brain Melt Acid Art':
151
+ API_URL = "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art"
152
+ prompt = f"maximalism, in an acid surrealism style, {prompt}"
153
+ elif model == 'Lustly Flux Uncensored v1':
154
+ API_URL = "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1"
155
+ elif model == 'NSFW Master Flux':
156
+ API_URL = "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX"
157
+ prompt = f"NSFW, {prompt}"
158
+ elif model == 'Flux Outfit Generator':
159
+ API_URL = "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator"
160
+ elif model == 'Midjourney':
161
+ API_URL = "https://api-inference.huggingface.co/models/Jovie/Midjourney"
162
+ elif model == 'DreamPhotoGASM':
163
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
164
+ elif model == 'Flux Super Realism LoRA':
165
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA"
166
+ elif model == 'Stable Diffusion 2-1':
167
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base"
168
+ elif model == 'Stable Diffusion 3.5 Large':
169
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
170
+ elif model == 'Stable Diffusion 3.5 Large Turbo':
171
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
172
+ elif model == 'Stable Diffusion 3 Medium':
173
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
174
+ prompt = f"A, {prompt}"
175
+ elif model == 'Duchaiten Real3D NSFW XL':
176
+ API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl"
177
+ elif model == 'Pixel Art XL':
178
+ API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
179
+ prompt = f"pixel art, {prompt}"
180
+ elif model == 'Character Design':
181
+ API_URL = "https://api-inference.huggingface.co/models/KappaNeuro/character-design"
182
+ prompt = f"Character Design, {prompt}"
183
+ elif model == 'Sketched Out Manga':
184
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga"
185
+ prompt = f"daiton, {prompt}"
186
+ elif model == 'Archfey Anime':
187
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/archfey_anime"
188
+ elif model == 'Lofi Cuties':
189
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties"
190
+ elif model == 'YiffyMix':
191
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/YiffyMix"
192
+ elif model == 'Analog Madness Realistic v7':
193
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7"
194
+ elif model == 'Selfie Photography':
195
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl"
196
+ prompt = f"instagram model, discord profile picture, {prompt}"
197
+ elif model == 'Filmgrain':
198
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl"
199
+ prompt = f"Film Grain, FilmGrainAF, {prompt}"
200
+ elif model == 'Leonardo AI Style Illustration':
201
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration"
202
+ prompt = f"leonardo style, illustration, vector art, {prompt}"
203
+ elif model == 'Cyborg Style XL':
204
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl"
205
+ prompt = f"cyborg style, {prompt}"
206
+ elif model == 'Little Tinies':
207
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/littletinies"
208
+ elif model == 'NSFW XL':
209
+ API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
210
+ elif model == 'Analog Redmond':
211
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
212
+ prompt = f"timeless style, {prompt}"
213
+ elif model == 'Pixel Art Redmond':
214
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
215
+ prompt = f"Pixel Art, {prompt}"
216
+ elif model == 'Ascii Art':
217
+ API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
218
+ prompt = f"ascii art, {prompt}"
219
+ elif model == 'Analog':
220
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
221
+ elif model == 'Maple Syrup':
222
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
223
+ elif model == 'Perfect Lewd Fantasy':
224
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
225
+ elif model == 'AbsoluteReality 1.8.1':
226
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
227
+ elif model == 'Disney':
228
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
229
+ prompt = f"Disney style, {prompt}"
230
+ elif model == 'Redmond SDXL':
231
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
232
+ elif model == 'epiCPhotoGasm':
233
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
234
+ else:
235
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
236
+
237
+ # Prepare payload
238
+ payload = {
239
+ "inputs": prompt,
240
+ "is_negative": is_negative,
241
+ "steps": steps,
242
+ "cfg_scale": cfg_scale,
243
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
244
+ "strength": strength,
245
+ "parameters": {
246
+ "width": width,
247
+ "height": height
248
+ }
249
+ }
250
+
251
+ # 타임아웃 값을 늘리고 재시도 로직 추가
252
+ max_retries = 3
253
+ current_retry = 0
254
+
255
+ while current_retry < max_retries:
256
+ try:
257
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=180) # 타임아웃을 180초로 증가
258
+ response.raise_for_status()
259
+
260
+ image = Image.open(io.BytesIO(response.content))
261
+ print(f'Generation {key} completed successfully')
262
+ return image
263
+
264
+ except requests.exceptions.Timeout:
265
+ current_retry += 1
266
+ if current_retry < max_retries:
267
+ print(f"Timeout occurred. Retrying... (Attempt {current_retry + 1}/{max_retries})")
268
+ continue
269
+ else:
270
+ raise gr.Error(f"Request timed out after {max_retries} attempts. The model might be busy, please try again later.")
271
+
272
+ except requests.exceptions.RequestException as e:
273
+ raise gr.Error(f"Request failed: {str(e)}")
274
+
275
+ except requests.exceptions.RequestException as e:
276
+ error_message = f"Request failed: {str(e)}"
277
+ if hasattr(e, 'response') and e.response is not None:
278
+ if e.response.status_code == 401:
279
+ error_message = "Invalid API token. Please check your Hugging Face API token."
280
+ elif e.response.status_code == 403:
281
+ error_message = "Access denied. Please check your API token permissions."
282
+ elif e.response.status_code == 503:
283
+ error_message = "Model is currently loading. Please try again in a few moments."
284
+ raise gr.Error(error_message)
285
+ except Exception as e:
286
+ raise gr.Error(f"Unexpected error: {str(e)}")
287
+
288
+
289
+ def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height, progress=gr.Progress()):
290
+ if len(selected_models) > 4:
291
+ raise gr.Error("Please select up to 4 models")
292
+ if len(selected_models) == 0:
293
+ raise gr.Error("Please select at least 1 model")
294
+
295
+ # 초기 이미지 배열 생성
296
+ images = []
297
+ total_models = len(selected_models[:4])
298
+
299
+ # 각 모델별로 이미지 생성
300
+ for idx, model_name in enumerate(selected_models[:4]):
301
+ try:
302
+ progress((idx + 1) / total_models, f"Generating image for {model_name}...")
303
+ img = query(prompt, model_name, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height)
304
+ if img:
305
+ images.append(img)
306
+ except Exception as e:
307
+ print(f"Error generating image for {model_name}: {str(e)}")
308
+ continue
309
+
310
+ # 최소한 하나의 이미지가 생성되었는지 확인
311
+ if not images:
312
+ raise gr.Error("Failed to generate any images. Please try again.")
313
+
314
+ # 4개의 이미지 슬롯을 채움
315
+ while len(images) < 4:
316
+ images.append(images[-1])
317
+
318
+ # 이미지가 올바르게 로드되었는지 확인
319
+ valid_images = []
320
+ for img in images:
321
+ try:
322
+ # 이미지 복사본 생성
323
+ img_copy = img.copy()
324
+ valid_images.append(img_copy)
325
+ except Exception as e:
326
+ print(f"Error processing image: {str(e)}")
327
+ # 오류가 발생한 경우 마지막 유효한 이미지로 대체
328
+ if valid_images:
329
+ valid_images.append(valid_images[-1].copy())
330
+ else:
331
+ raise gr.Error("Failed to process images. Please try again.")
332
+
333
+ progress(1.0, "Generation complete!")
334
+ return valid_images
335
+
336
+ css = """
337
+ footer {
338
+ visibility: hidden;
339
+ }
340
+ """
341
+
342
+ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
343
+ gr.Markdown("# ZeroWeight Studio")
344
+
345
+ with gr.Row():
346
+ with gr.Column(scale=2):
347
+ text_prompt = gr.Textbox(
348
+ label="Prompt",
349
+ placeholder="Describe what you want to create...",
350
+ lines=3
351
+ )
352
+
353
+ negative_prompt = gr.Textbox(
354
+ label="Negative Prompt",
355
+ placeholder="What should not be in the image",
356
+ value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
357
+ lines=2
358
+ )
359
+
360
+ custom_lora = gr.Textbox(
361
+ label="Custom LoRA Path (Optional)",
362
+ placeholder="e.g., multimodalart/vintage-ads-flux",
363
+ lines=1
364
+ )
365
+
366
+ with gr.Column(scale=1):
367
+ with gr.Group():
368
+ gr.Markdown("### Image Settings")
369
+ width = gr.Slider(label="Width", value=1024, minimum=512, maximum=1216, step=64)
370
+ height = gr.Slider(label="Height", value=1024, minimum=512, maximum=1216, step=64)
371
+
372
+ with gr.Group():
373
+ gr.Markdown("### Generation Parameters")
374
+ steps = gr.Slider(label="Steps", value=35, minimum=1, maximum=100, step=1)
375
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5)
376
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.1)
377
+ seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=1000000000, step=1)
378
+
379
+ with gr.Accordion("Model Selection", open=False):
380
+ model_search = gr.Textbox(
381
+ label="Search Models",
382
+ placeholder="Type to filter models...",
383
+ lines=1
384
+ )
385
+
386
+ # 상위 4개 모델을 기본으로 설정
387
+ default_models = [
388
+ "FLUX.1 [Schnell]", # 모델 이름 통일
389
+ "Stable Diffusion 3.5 Large",
390
+ "Stable Diffusion 3.5 Large Turbo",
391
+ "Midjourney"
392
+ ]
393
+
394
+ # 전체 모델 리스트
395
+ models_list = [
396
+ "FLUX.1 [Schnell]", # 모델 이름 통일
397
+ "Stable Diffusion 3.5 Large",
398
+ "Stable Diffusion 3.5 Large Turbo",
399
+ "Stable Diffusion XL",
400
+ "FLUX.1 [Dev]",
401
+ "Midjourney",
402
+ "DreamPhotoGASM",
403
+ "Disney",
404
+ "Leonardo AI Style Illustration",
405
+ "AbsoluteReality 1.8.1",
406
+ "Analog Redmond",
407
+ "Stable Diffusion 3 Medium",
408
+ "Flux Super Realism LoRA",
409
+ "Flux Realism LoRA",
410
+ "Selfie Photography",
411
+ "Character Design",
412
+ "Pixel Art XL",
413
+ "3D Sketchfab",
414
+ "Flux Animex V2",
415
+ "Flux Animeo V1",
416
+ "Flux AestheticAnime",
417
+ "90s Anime Art",
418
+ "Softserve Anime",
419
+ "Brain Melt Acid Art",
420
+ "Retro Comic Flux",
421
+ "Purple Dreamy",
422
+ "SoftPasty Flux",
423
+ "Flux Logo Design",
424
+ "Product Design",
425
+ "Propaganda Poster",
426
+ "Movie Board",
427
+ "Collage Flux"
428
+ ]
429
+
430
+ model = gr.Checkboxgroup(
431
+ label="Select Models (Choose up to 4)",
432
+ choices=models_list,
433
+ value=default_models,
434
+ interactive=True
435
+ )
436
+
437
+ with gr.Row():
438
+ generate_btn = gr.Button("Generate 2x2 Grid", variant="primary", size="lg")
439
+
440
+ with gr.Row():
441
+ gallery = gr.Gallery(
442
+ label="Generated Images",
443
+ show_label=True,
444
+ elem_id="gallery",
445
+ columns=2,
446
+ rows=2,
447
+ height="auto"
448
+ )
449
+
450
+ generate_btn.click(
451
+ fn=generate_grid,
452
+ inputs=[
453
+ text_prompt,
454
+ model,
455
+ custom_lora,
456
+ negative_prompt,
457
+ steps,
458
+ cfg,
459
+ seed,
460
+ strength,
461
+ width,
462
+ height
463
+ ],
464
+ outputs=gallery
465
+ )
466
+
467
+
468
+
469
+ def filter_models(search_term):
470
+ filtered_models = [m for m in models_list if search_term.lower() in m.lower()]
471
+ return gr.update(choices=filtered_models, value=[])
472
+
473
+ model_search.change(filter_models, inputs=model_search, outputs=model)
474
+
475
+ if __name__ == "__main__":
476
+ dalle.launch(show_api=False, share=False)