Spaces:
Runtime error
Runtime error
add logs
Browse files- app.py +50 -38
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import io
|
3 |
import time
|
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
import gradio as gr
|
@@ -14,35 +15,45 @@ from fastapi import FastAPI, Response
|
|
14 |
import uvicorn
|
15 |
from pydantic import BaseModel
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
login(token = os.getenv('HF_TOKEN'))
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
if torch.cuda.is_available():
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
|
28 |
def clear_cuda():
|
29 |
if torch.cuda.is_available():
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
torch.cuda.empty_cache()
|
35 |
torch.cuda.reset_peak_memory_stats()
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
|
41 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
42 |
|
43 |
start_full_time = time.time()
|
44 |
start_time = time.time()
|
45 |
-
|
46 |
pipe = FluxPipeline.from_pretrained(
|
47 |
# pretrained_model_name_or_path = local_path,
|
48 |
"black-forest-labs/FLUX.1-dev",
|
@@ -53,69 +64,71 @@ pipe = FluxPipeline.from_pretrained(
|
|
53 |
# variant="fp16",
|
54 |
use_safetensors=True
|
55 |
)
|
56 |
-
|
57 |
|
58 |
start_time = time.time()
|
59 |
-
|
60 |
pipe.load_lora_weights("Shakker-Labs/FLUX.1-dev-LoRA-add-details", weight_name="FLUX-dev-lora-add_details.safetensors")
|
61 |
-
|
62 |
|
63 |
pipe.fuse_lora(lora_scale=0.5)
|
64 |
pipe.to(device)
|
65 |
|
66 |
start_time = time.time()
|
67 |
-
|
68 |
aura_sr = AuraSR.from_pretrained("fal/AuraSR-v2")
|
69 |
-
|
70 |
|
71 |
start_time = time.time()
|
72 |
-
|
73 |
bg_remover = pipeline("image-segmentation", "briaai/RMBG-1.4", trust_remote_code=True )
|
74 |
-
|
75 |
|
76 |
-
|
77 |
|
78 |
@spaces.GPU()
|
79 |
def generate_image(object_name, remove_bg=True, upscale=True):
|
80 |
try:
|
81 |
-
|
82 |
object_name = translate_ru_en(object_name)
|
83 |
prompt = create_template_prompt(object_name)
|
84 |
|
85 |
# Для имитации генерации (можно заменить на реальный вызов ComfyUI API)
|
86 |
-
|
87 |
-
|
88 |
# print(f"Параметры: seed={seed}, steps={steps}, размер={width}x{height}")
|
89 |
-
|
90 |
steps = int(os.getenv('STEPS')) if os.getenv('STEPS') is not None else 10
|
91 |
-
|
92 |
|
93 |
clear_cuda()
|
94 |
|
95 |
start_time = time.time()
|
96 |
-
|
97 |
image = generate_image_stable(prompt, steps)
|
98 |
-
|
99 |
|
100 |
if upscale :
|
101 |
clear_cuda()
|
102 |
start_time = time.time()
|
103 |
-
|
104 |
image = aura_sr.upscale_4x_overlapped(image)
|
105 |
-
|
106 |
|
107 |
if remove_bg :
|
108 |
clear_cuda()
|
109 |
start_time = time.time()
|
110 |
-
|
111 |
image = bg_remover(image)
|
112 |
-
|
113 |
|
|
|
114 |
clear_cuda()
|
|
|
115 |
return image
|
116 |
|
117 |
except Exception as e:
|
118 |
-
|
119 |
traceback.print_exc()
|
120 |
clear_cuda()
|
121 |
return None
|
@@ -147,7 +160,7 @@ def translate_ru_en(text: str):
|
|
147 |
return translator.translate(text)
|
148 |
|
149 |
except Exception as e:
|
150 |
-
|
151 |
traceback.print_exc()
|
152 |
return text
|
153 |
|
@@ -155,7 +168,6 @@ def load_text(file_name):
|
|
155 |
with open(file_name, 'r', encoding='utf-8') as f:
|
156 |
return f.read()
|
157 |
|
158 |
-
|
159 |
fastapi_app = FastAPI()
|
160 |
|
161 |
# Модель данных для POST-запроса
|
|
|
1 |
import os
|
2 |
import io
|
3 |
import time
|
4 |
+
from datetime import datetime
|
5 |
import spaces
|
6 |
import torch
|
7 |
import gradio as gr
|
|
|
15 |
import uvicorn
|
16 |
from pydantic import BaseModel
|
17 |
|
18 |
+
def print_with_time(*args, **kwargs):
|
19 |
+
tz = pytz.timezone(kwargs.pop('timezone', 'Europe/Moscow'))
|
20 |
+
time_fmt = kwargs.pop('time_format', '%Y-%m-%d %H:%M:%S')
|
21 |
+
separator = kwargs.pop('sep', ' - ')
|
22 |
+
moscow_time = datetime.now(tz).strftime(time_fmt)
|
23 |
+
message = separator.join(str(arg) for arg in args)
|
24 |
+
print(f"[{moscow_time}]{separator}{message}", **kwargs)
|
25 |
+
|
26 |
+
print_with_time("Старт сервиса")
|
27 |
+
|
28 |
login(token = os.getenv('HF_TOKEN'))
|
29 |
|
30 |
+
print_with_time("="*50)
|
31 |
+
print_with_time(f"PyTorch version: {torch.__version__}")
|
32 |
+
print_with_time(f"CUDA available: {torch.cuda.is_available()}")
|
33 |
+
print_with_time(f"GPU count: {torch.cuda.device_count()}")
|
34 |
if torch.cuda.is_available():
|
35 |
+
print_with_time(f"Current device: {torch.cuda.current_device()}")
|
36 |
+
print_with_time(f"Device name: {torch.cuda.get_device_name(0)}")
|
37 |
+
print_with_time("="*50)
|
38 |
|
39 |
def clear_cuda():
|
40 |
if torch.cuda.is_available():
|
41 |
+
print_with_time(f"Используется VRAM: {torch.cuda.memory_allocated() / 1024 ** 3:.2f} GB")
|
42 |
+
print_with_time(f"Доступно VRAM: {torch.cuda.memory_reserved() / 1024 ** 3:.2f} GB")
|
43 |
+
print_with_time(f"Пиковое использование VRAM: {torch.cuda.max_memory_allocated() / 1024 ** 3:.2f} GB")
|
44 |
+
print_with_time(f"Очистка кеша CUDA...")
|
45 |
torch.cuda.empty_cache()
|
46 |
torch.cuda.reset_peak_memory_stats()
|
47 |
+
print_with_time(f"Очистка кеша CUDA завершена.")
|
48 |
+
print_with_time(f"Используется VRAM: {torch.cuda.memory_allocated() / 1024 ** 3:.2f} GB")
|
49 |
+
print_with_time(f"Доступно VRAM: {torch.cuda.memory_reserved() / 1024 ** 3:.2f} GB")
|
50 |
+
print_with_time(f"Пиковое использование VRAM: {torch.cuda.max_memory_allocated() / 1024 ** 3:.2f} GB")
|
51 |
|
52 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
53 |
|
54 |
start_full_time = time.time()
|
55 |
start_time = time.time()
|
56 |
+
print_with_time(f"Загрузка модели FLUX.1-dev")
|
57 |
pipe = FluxPipeline.from_pretrained(
|
58 |
# pretrained_model_name_or_path = local_path,
|
59 |
"black-forest-labs/FLUX.1-dev",
|
|
|
64 |
# variant="fp16",
|
65 |
use_safetensors=True
|
66 |
)
|
67 |
+
print_with_time(f"Загрузка модели FLUX.1-dev завершена за {time.time() - start_time:.2f} секунд")
|
68 |
|
69 |
start_time = time.time()
|
70 |
+
print_with_time(f"Загрузка LoRA")
|
71 |
pipe.load_lora_weights("Shakker-Labs/FLUX.1-dev-LoRA-add-details", weight_name="FLUX-dev-lora-add_details.safetensors")
|
72 |
+
print_with_time(f"Загрузка LoRA завершена за {time.time() - start_time:.2f} секунд")
|
73 |
|
74 |
pipe.fuse_lora(lora_scale=0.5)
|
75 |
pipe.to(device)
|
76 |
|
77 |
start_time = time.time()
|
78 |
+
print_with_time(f"Загрузка модели fal/AuraSR-v2")
|
79 |
aura_sr = AuraSR.from_pretrained("fal/AuraSR-v2")
|
80 |
+
print_with_time(f"Загрузка модели fal/AuraSR-v2 завершена за {time.time() - start_time:.2f} секунд")
|
81 |
|
82 |
start_time = time.time()
|
83 |
+
print_with_time(f"Загрузка модели briaai/RMBG-1.4")
|
84 |
bg_remover = pipeline("image-segmentation", "briaai/RMBG-1.4", trust_remote_code=True )
|
85 |
+
print_with_time(f"Загрузка модели briaai/RMBG-1.4 завершена за {time.time() - start_time:.2f} секунд")
|
86 |
|
87 |
+
print_with_time(f"Загрузка всех моделей завершена за {time.time() - start_full_time:.2f} секунд")
|
88 |
|
89 |
@spaces.GPU()
|
90 |
def generate_image(object_name, remove_bg=True, upscale=True):
|
91 |
try:
|
92 |
+
print_with_time("Формирование промпта")
|
93 |
object_name = translate_ru_en(object_name)
|
94 |
prompt = create_template_prompt(object_name)
|
95 |
|
96 |
# Для имитации генерации (можно заменить на реальный вызов ComfyUI API)
|
97 |
+
print_with_time(f"Генерация иконки для объекта: {object_name}")
|
98 |
+
print_with_time(f"Промпт: {prompt[:100]}...")
|
99 |
# print(f"Параметры: seed={seed}, steps={steps}, размер={width}x{height}")
|
100 |
+
print_with_time(f"Опции: remove_bg={remove_bg}")
|
101 |
steps = int(os.getenv('STEPS')) if os.getenv('STEPS') is not None else 10
|
102 |
+
print_with_time(f"Шаги: {steps}")
|
103 |
|
104 |
clear_cuda()
|
105 |
|
106 |
start_time = time.time()
|
107 |
+
print_with_time("Старт генерации изображения...")
|
108 |
image = generate_image_stable(prompt, steps)
|
109 |
+
print_with_time(f"Генерация завершена за {time.time() - start_time:.2f} секунд")
|
110 |
|
111 |
if upscale :
|
112 |
clear_cuda()
|
113 |
start_time = time.time()
|
114 |
+
print_with_time(f"Старт апскейлера...")
|
115 |
image = aura_sr.upscale_4x_overlapped(image)
|
116 |
+
print_with_time(f"Апскейлинг завершен за {time.time() - start_time:.2f} секунд")
|
117 |
|
118 |
if remove_bg :
|
119 |
clear_cuda()
|
120 |
start_time = time.time()
|
121 |
+
print_with_time(f"Старт удаления фона...")
|
122 |
image = bg_remover(image)
|
123 |
+
print_with_time(f"Фон удален за {time.time() - start_time:.2f} секунд")
|
124 |
|
125 |
+
print_with_time("Генерация успешно завершена.")
|
126 |
clear_cuda()
|
127 |
+
print_with_time("Отдаем готовое изображение")
|
128 |
return image
|
129 |
|
130 |
except Exception as e:
|
131 |
+
print_with_time(f"Ошибка при генерации изображения: {e}")
|
132 |
traceback.print_exc()
|
133 |
clear_cuda()
|
134 |
return None
|
|
|
160 |
return translator.translate(text)
|
161 |
|
162 |
except Exception as e:
|
163 |
+
print_with_time(f"Ошибка перевода: {e}")
|
164 |
traceback.print_exc()
|
165 |
return text
|
166 |
|
|
|
168 |
with open(file_name, 'r', encoding='utf-8') as f:
|
169 |
return f.read()
|
170 |
|
|
|
171 |
fastapi_app = FastAPI()
|
172 |
|
173 |
# Модель данных для POST-запроса
|
requirements.txt
CHANGED
@@ -21,3 +21,4 @@ deep_translator
|
|
21 |
fastapi
|
22 |
uvicorn
|
23 |
pydantic
|
|
|
|
21 |
fastapi
|
22 |
uvicorn
|
23 |
pydantic
|
24 |
+
pytz
|