Kingrane commited on
Commit
8e294e8
·
verified ·
1 Parent(s): deb6de0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -136
app.py CHANGED
@@ -1,59 +1,56 @@
1
  import gradio as gr
2
- from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
3
  import torch
4
  import gc
5
  import time
6
  import os
7
 
8
- # --- Оптимизированная конфигурация ---
9
- MODEL_ID = "playgroundai/playground-v2.5-1024px-aesthetic" # Современная модель 2024 года
10
- DEVICE = "cpu" # Принудительно CPU для бесплатного тира
11
- DTYPE = torch.float32 # Для CPU используем float32
 
 
12
 
13
- print(f"INFO: Space starting. Device: {DEVICE}, Model: {MODEL_ID}")
14
- print(f"INFO: Available RAM: {os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024**3):.2f} GB")
15
 
16
- # --- Функция для очистки кэша и памяти ---
17
- def clear_memory():
18
- if torch.cuda.is_available():
19
- torch.cuda.empty_cache()
20
- gc.collect()
21
-
22
- # --- Загрузка модели с оптимизациями ---
23
  pipe = None
24
  try:
25
- print(f"INFO: Loading model {MODEL_ID}...")
26
- load_start_time = time.time()
27
 
28
- # Создаем оптимизированный планировщик
29
- scheduler = DPMSolverMultistepScheduler.from_pretrained(
30
- MODEL_ID,
31
- subfolder="scheduler",
32
- solver_order=2,
33
- prediction_type="epsilon",
34
- thresholding=False,
35
- algorithm_type="sde-dpmsolver++",
36
- solver_type="midpoint"
37
  )
38
 
39
- # Загружаем модель с оптимизациями
40
- pipe = StableDiffusionPipeline.from_pretrained(
41
- MODEL_ID,
42
- scheduler=scheduler,
43
  torch_dtype=DTYPE,
44
  use_safetensors=True,
45
- low_cpu_mem_usage=True,
46
- safety_checker=None # Отключаем для скорости
47
  )
48
 
49
- # Применяем оптимизации
50
- pipe.enable_attention_slicing(1)
 
 
 
51
 
52
- load_end_time = time.time()
53
- print(f"INFO: Model loaded successfully in {load_end_time - load_start_time:.2f} seconds.")
54
 
55
  except Exception as e:
56
- print(f"ERROR: Critical error loading model: {e}")
57
 
58
  # --- Функция генерации промпта ---
59
  def create_prompt_and_negative(user_description, style_choice):
@@ -77,77 +74,64 @@ def create_prompt_and_negative(user_description, style_choice):
77
  final_prompt = f"{base_description}, {style_keywords}, high quality, centered composition"
78
  return final_prompt, negative_prompt
79
 
80
- # --- Оптимизированная функция генерации ---
81
- def generate_emoji(description, style, num_steps, guidance_scale_val, seed_val, size_choice):
82
  if pipe is None:
83
- raise gr.Error("ERROR: Model is not loaded. The Space might be restarting or encountered an issue. Please check logs or try again later.")
84
  if not description.strip():
85
- raise gr.Error("Please enter a description for your emoji!")
86
 
87
- # Определяем размер изображения
88
  if size_choice == "Small (faster)":
89
- height, width = 384, 384
90
  elif size_choice == "Medium":
91
- height, width = 512, 512
92
- else: # Large
93
- height, width = 640, 640
94
 
95
- # Генерация сида
96
- if seed_val is None or seed_val == -1 or seed_val == 0:
97
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
98
  else:
99
  seed = int(seed_val)
100
-
101
  generator = torch.Generator(device='cpu').manual_seed(seed)
102
 
 
103
  prompt, negative_prompt = create_prompt_and_negative(description, style)
104
- print(f"INFO: Generating for: '{description}' | Style: '{style}' | Size: {width}x{height}")
105
- print(f"INFO: Prompt: {prompt}")
106
- print(f"INFO: Negative Prompt: {negative_prompt}")
107
- print(f"INFO: Steps: {num_steps}, Guidance: {guidance_scale_val}, Seed: {seed}")
108
-
109
- # Ограничение шагов для CPU
110
- effective_steps = min(int(num_steps), 20)
111
- if int(num_steps) > 20:
112
- print(f"WARN: Steps capped at 20 for CPU. Original: {num_steps}")
113
-
114
- generation_start_time = time.time()
115
  try:
116
- # Очищаем память перед генерацией
117
- clear_memory()
118
-
119
- # Используем torch.no_grad() для экономии памяти
120
  with torch.no_grad():
 
121
  image = pipe(
122
  prompt,
123
  negative_prompt=negative_prompt,
124
- num_inference_steps=effective_steps,
125
  guidance_scale=float(guidance_scale_val),
126
  generator=generator,
127
  height=height,
128
  width=width
129
  ).images[0]
130
-
131
- generation_end_time = time.time()
132
- generation_time = generation_end_time - generation_start_time
133
- print(f"INFO: Image generated successfully in {generation_time:.2f} seconds.")
134
-
135
- # Очищаем память после генерации
136
- clear_memory()
137
 
 
 
 
 
 
138
  return image, seed, f"Generated in {generation_time:.1f} seconds"
139
-
140
  except Exception as e:
141
- generation_end_time = time.time()
142
- print(f"ERROR: Exception during generation ({generation_end_time - generation_start_time:.2f}s): {e}")
143
- clear_memory()
144
-
145
- if "out of memory" in str(e).lower():
146
- raise gr.Error(f"Generation failed: Out of memory. Try a smaller size or fewer steps. (Error: {e})")
147
- else:
148
- raise gr.Error(f"Generation failed. Please check your input or try again. (Error: {e})")
149
 
150
- # --- Современный интерфейс Gradio ---
151
  css = """
152
  #title_custom {
153
  text-align: center;
@@ -169,15 +153,6 @@ css = """
169
  transform: translateY(-2px);
170
  box-shadow: 0 5px 15px rgba(255, 140, 0, 0.3) !important;
171
  }
172
- .gr-form {
173
- border-radius: 12px !important;
174
- box-shadow: 0 4px 20px rgba(0,0,0,0.08) !important;
175
- }
176
- .gr-success {
177
- background-color: #E6FFFA !important;
178
- color: #1A7F47 !important;
179
- border-color: #A1E0C0 !important;
180
- }
181
  .emoji-preview {
182
  border-radius: 12px;
183
  overflow: hidden;
@@ -186,94 +161,81 @@ css = """
186
  footer {visibility: hidden}
187
  """
188
 
189
- with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.blue)) as demo:
190
  gr.Markdown("<h1 id='title_custom'>✨ Dreamoji AI Studio 2025 ✨</h1>", elem_id="title_custom")
191
  gr.Markdown(
192
  "<div class='container'>"
193
- "Turn your wildest emoji ideas into reality with Playground v2.5, a modern AI model from late 2024! "
194
- "Describe your emoji, pick a style, and watch the magic happen. "
195
- "<b>Note:</b> Generation on free CPU hardware takes 1-3 minutes per emoji."
196
  "</div>"
197
  )
198
 
199
  with gr.Row():
200
  with gr.Column(scale=2):
201
  description_input = gr.Textbox(
202
- label="1. Describe your Dream Emoji:",
203
- placeholder="e.g., 'a sleepy sloth drinking coffee', 'a confused robot with a lightbulb', 'a joyful avocado dancing'"
204
  )
205
  style_input = gr.Radio(
206
  ["Apple (iOS) Style", "Google (Noto) Style", "Hand-Drawn Style", "Anime Style"],
207
- label="2. Choose Emoji Style:",
208
  value="Apple (iOS) Style"
209
  )
210
 
211
- with gr.Row():
212
- size_choice = gr.Radio(
213
- ["Small (faster)", "Medium", "Large (slower)"],
214
- label="3. Choose Size:",
215
- value="Small (faster)",
216
- info="Smaller = faster generation"
217
- )
218
 
219
  with gr.Accordion("⚙️ Advanced Settings", open=False):
220
- with gr.Row():
221
- num_steps_slider = gr.Slider(
222
- minimum=10,
223
- maximum=25,
224
- value=15,
225
- step=1,
226
- label="Inference Steps",
227
- info="More steps = more detail but slower (max 20 on CPU)"
228
- )
229
- guidance_scale_slider = gr.Slider(
230
- minimum=1.0,
231
- maximum=12.0,
232
- value=7.0,
233
- step=0.5,
234
- label="Guidance Scale",
235
- info="How strictly to follow prompt"
236
- )
237
  seed_input = gr.Number(
238
- label="Seed (use -1 for random, or a number to reproduce results)",
239
  value=-1
240
  )
241
 
242
- generate_button = gr.Button("✨ Generate My Dreamoji! ✨", variant="primary", elem_id="generate_button_custom")
243
 
244
  with gr.Column(scale=1):
245
  output_image = gr.Image(
246
- label="Your Generated Dreamoji:",
247
  type="pil",
248
  elem_classes="emoji-preview"
249
  )
250
- used_seed_output = gr.Textbox(label="Seed Used:", interactive=False)
251
- generation_time = gr.Textbox(label="Generation Time:", interactive=False)
252
 
253
  generate_button.click(
254
  generate_emoji,
255
- inputs=[description_input, style_input, num_steps_slider, guidance_scale_slider, seed_input, size_choice],
256
  outputs=[output_image, used_seed_output, generation_time],
257
  )
258
 
259
  gr.Markdown("---")
260
 
261
- with gr.Accordion("🤔 Tips for Awesome Dreamojis", open=True):
262
  gr.Markdown(
263
- "- **Keep it simple:** 'A happy cat' works better than complex descriptions\n"
264
- "- **Try different styles:** Each style has its own unique look\n"
265
- "- **Use the seed:** Found a good result? Save the seed and tweak the prompt\n"
266
- "- **Size matters:** Smaller sizes generate much faster on CPU\n"
267
- "- **Fewer steps:** For quick tests, try 10-15 steps first"
268
  )
269
 
270
  gr.HTML("""
271
  <div style="text-align: center; margin-top: 20px; font-size: 0.9em; color: #777;">
272
- Created with Gradio & Hugging Face Spaces. Model: Playground v2.5 (2024).
273
  <br>
274
  If you enjoy this, consider supporting by giving the Space a ❤️!
275
  </div>
276
  """)
277
 
278
- # Запускаем с очередью для обработки нескольких запросов
279
- demo.queue(max_size=10).launch()
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel
3
  import torch
4
  import gc
5
  import time
6
  import os
7
 
8
+ # --- Конфигурация SDXL-Lightning ---
9
+ BASE_MODEL_ID = "stabilityai/stable-diffusion-xl-base-1.0"
10
+ LIGHTNING_MODEL_ID = "ByteDance/SDXL-Lightning"
11
+ LIGHTNING_CHECKPOINT = "sdxl_lightning_4step_unet.safetensors"
12
+ DEVICE = "cpu"
13
+ DTYPE = torch.float32
14
 
15
+ print(f"INFO: Starting with SDXL-Lightning on {DEVICE}")
 
16
 
17
+ # --- Загрузка модели ---
 
 
 
 
 
 
18
  pipe = None
19
  try:
20
+ print("INFO: Loading SDXL-Lightning model...")
21
+ load_start = time.time()
22
 
23
+ # Загружаем базовую модель SDXL
24
+ pipe = StableDiffusionXLPipeline.from_pretrained(
25
+ BASE_MODEL_ID,
26
+ torch_dtype=DTYPE,
27
+ use_safetensors=True,
28
+ variant="fp16", # Используем fp16 вариант для экономии памяти
29
+ low_cpu_mem_usage=True,
30
+ safety_checker=None
 
31
  )
32
 
33
+ # Загружаем оптимизированный UNet из SDXL-Lightning
34
+ lightning_unet = UNet2DConditionModel.from_pretrained(
35
+ LIGHTNING_MODEL_ID,
36
+ subfolder="unet",
37
  torch_dtype=DTYPE,
38
  use_safetensors=True,
39
+ variant=LIGHTNING_CHECKPOINT,
40
+ low_cpu_mem_usage=True
41
  )
42
 
43
+ # Заменяем UNet в пайплайне на Lightning версию
44
+ pipe.unet = lightning_unet
45
+
46
+ # Оптимизации для CPU
47
+ pipe.enable_attention_slicing()
48
 
49
+ load_end = time.time()
50
+ print(f"INFO: Model loaded successfully in {load_end - load_start:.2f} seconds")
51
 
52
  except Exception as e:
53
+ print(f"ERROR: Failed to load model: {e}")
54
 
55
  # --- Функция генерации промпта ---
56
  def create_prompt_and_negative(user_description, style_choice):
 
74
  final_prompt = f"{base_description}, {style_keywords}, high quality, centered composition"
75
  return final_prompt, negative_prompt
76
 
77
+ # --- Функция генерации ---
78
+ def generate_emoji(description, style, guidance_scale_val, seed_val, size_choice):
79
  if pipe is None:
80
+ raise gr.Error("Model not loaded. Please check logs.")
81
  if not description.strip():
82
+ raise gr.Error("Please enter a description!")
83
 
84
+ # Размер
85
  if size_choice == "Small (faster)":
86
+ height, width = 512, 512 # SDXL лучше работает с размерами кратными 512
87
  elif size_choice == "Medium":
88
+ height, width = 768, 768
89
+ else:
90
+ height, width = 1024, 1024
91
 
92
+ # Сид
93
+ if seed_val is None or seed_val == -1:
94
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
95
  else:
96
  seed = int(seed_val)
97
+
98
  generator = torch.Generator(device='cpu').manual_seed(seed)
99
 
100
+ # Промпт
101
  prompt, negative_prompt = create_prompt_and_negative(description, style)
102
+ print(f"INFO: Generating: '{description}' | Style: '{style}' | Size: {width}x{height}")
103
+ print(f"INFO: Guidance: {guidance_scale_val}, Seed: {seed}")
104
+
105
+ # Генерация
106
+ gc.collect()
107
+ start_time = time.time()
108
+
 
 
 
 
109
  try:
 
 
 
 
110
  with torch.no_grad():
111
+ # SDXL-Lightning оптимизирован для 4 шагов
112
  image = pipe(
113
  prompt,
114
  negative_prompt=negative_prompt,
115
+ num_inference_steps=4, # Lightning оптимизирован для 4 шагов
116
  guidance_scale=float(guidance_scale_val),
117
  generator=generator,
118
  height=height,
119
  width=width
120
  ).images[0]
 
 
 
 
 
 
 
121
 
122
+ end_time = time.time()
123
+ generation_time = end_time - start_time
124
+ print(f"INFO: Generated in {generation_time:.2f} seconds")
125
+
126
+ gc.collect()
127
  return image, seed, f"Generated in {generation_time:.1f} seconds"
128
+
129
  except Exception as e:
130
+ print(f"ERROR: Generation failed: {e}")
131
+ gc.collect()
132
+ raise gr.Error(f"Generation failed: {e}")
 
 
 
 
 
133
 
134
+ # --- Интерфейс ---
135
  css = """
136
  #title_custom {
137
  text-align: center;
 
153
  transform: translateY(-2px);
154
  box-shadow: 0 5px 15px rgba(255, 140, 0, 0.3) !important;
155
  }
 
 
 
 
 
 
 
 
 
156
  .emoji-preview {
157
  border-radius: 12px;
158
  overflow: hidden;
 
161
  footer {visibility: hidden}
162
  """
163
 
164
+ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue=gr.themes.colors.orange)) as demo:
165
  gr.Markdown("<h1 id='title_custom'>✨ Dreamoji AI Studio 2025 ✨</h1>", elem_id="title_custom")
166
  gr.Markdown(
167
  "<div class='container'>"
168
+ "Turn your ideas into emojis with SDXL-Lightning - ultra-fast generation in just 4 steps! "
169
+ "Describe your emoji, choose a style, and watch the magic happen."
 
170
  "</div>"
171
  )
172
 
173
  with gr.Row():
174
  with gr.Column(scale=2):
175
  description_input = gr.Textbox(
176
+ label="1. Describe your Emoji:",
177
+ placeholder="e.g., 'a happy cat', 'a robot with a lightbulb', 'an avocado dancing'"
178
  )
179
  style_input = gr.Radio(
180
  ["Apple (iOS) Style", "Google (Noto) Style", "Hand-Drawn Style", "Anime Style"],
181
+ label="2. Choose Style:",
182
  value="Apple (iOS) Style"
183
  )
184
 
185
+ size_choice = gr.Radio(
186
+ ["Small (faster)", "Medium", "Large (slower)"],
187
+ label="3. Choose Size:",
188
+ value="Small (faster)"
189
+ )
 
 
190
 
191
  with gr.Accordion("⚙️ Advanced Settings", open=False):
192
+ guidance_scale_slider = gr.Slider(
193
+ minimum=1.0,
194
+ maximum=10.0,
195
+ value=7.0,
196
+ step=0.5,
197
+ label="Guidance Scale",
198
+ info="How strictly to follow prompt"
199
+ )
 
 
 
 
 
 
 
 
 
200
  seed_input = gr.Number(
201
+ label="Seed (-1 for random)",
202
  value=-1
203
  )
204
 
205
+ generate_button = gr.Button("✨ Generate My Dreamoji! ✨", variant="primary")
206
 
207
  with gr.Column(scale=1):
208
  output_image = gr.Image(
209
+ label="Your Dreamoji:",
210
  type="pil",
211
  elem_classes="emoji-preview"
212
  )
213
+ used_seed_output = gr.Textbox(label="Seed:", interactive=False)
214
+ generation_time = gr.Textbox(label="Time:", interactive=False)
215
 
216
  generate_button.click(
217
  generate_emoji,
218
+ inputs=[description_input, style_input, guidance_scale_slider, seed_input, size_choice],
219
  outputs=[output_image, used_seed_output, generation_time],
220
  )
221
 
222
  gr.Markdown("---")
223
 
224
+ with gr.Accordion("🤔 Tips", open=True):
225
  gr.Markdown(
226
+ "- **Ultra-Fast Generation:** SDXL-Lightning generates in just 4 steps!\n"
227
+ "- **Keep it simple:** Short descriptions work best\n"
228
+ "- **Try different styles:** Each style has a unique look\n"
229
+ "- **Save the seed:** Use it again to recreate similar results"
 
230
  )
231
 
232
  gr.HTML("""
233
  <div style="text-align: center; margin-top: 20px; font-size: 0.9em; color: #777;">
234
+ Created with Gradio & Hugging Face Spaces. Model: SDXL-Lightning by ByteDance.
235
  <br>
236
  If you enjoy this, consider supporting by giving the Space a ❤️!
237
  </div>
238
  """)
239
 
240
+ # Запускаем с очередью
241
+ demo.queue(max_size=5).launch()